diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml
index 2ecc98068..a120d8bbb 100644
--- a/.github/workflows/codeql-analysis.yml
+++ b/.github/workflows/codeql-analysis.yml
@@ -23,6 +23,11 @@ jobs:
strategy:
fail-fast: false
steps:
+ - name: Setup Go
+ uses: actions/setup-go@93397bea11091df50f3d7e59dc26a7711a8bcfbe # v4.1.0
+ with:
+ # renovate: datasource=golang-version depName=go
+ go-version: '1.21.1'
- name: Checkout repo
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4.1.0
with:
diff --git a/.golangci.yml b/.golangci.yml
index 157ee7cb8..71db871ab 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -3,7 +3,7 @@
# reference.
run:
go: '1.21'
- timeout: 3m # default is 1m
+ timeout: 5m # default is 1m
linters:
disable-all: true
enable:
diff --git a/cmd/observe/flows.go b/cmd/observe/flows.go
index 67a4b8a2e..bb3816298 100644
--- a/cmd/observe/flows.go
+++ b/cmd/observe/flows.go
@@ -389,6 +389,9 @@ func newFlowsCmdHelper(usage cmdUsage, vp *viper.Viper, ofilter *flowFilter) *co
filterFlags.Var(filterVar(
"http-path", ofilter,
`Show only flows which match this HTTP path regular expressions (e.g. "/page/\\d+")`))
+ filterFlags.Var(filterVar(
+ "http-url", ofilter,
+ `Show only flows which match this HTTP URL regular expressions (e.g. "http://.*cilium\.io/page/\\d+")`))
filterFlags.Var(filterVar(
"trace-id", ofilter,
diff --git a/cmd/observe/flows_filter.go b/cmd/observe/flows_filter.go
index 5b28077f8..1b0bbd04a 100644
--- a/cmd/observe/flows_filter.go
+++ b/cmd/observe/flows_filter.go
@@ -119,6 +119,7 @@ func newFlowFilter() *flowFilter {
{"http-status"},
{"http-method"},
{"http-path"},
+ {"http-url"},
{"protocol"},
{"port", "to-port"},
{"port", "from-port"},
@@ -435,6 +436,10 @@ func (of *flowFilter) set(f *filterTracker, name, val string, track bool) error
f.apply(func(f *flowpb.FlowFilter) {
f.HttpPath = append(f.HttpPath, val)
})
+ case "http-url":
+ f.apply(func(f *flowpb.FlowFilter) {
+ f.HttpUrl = append(f.HttpUrl, val)
+ })
case "type":
if wipe {
diff --git a/cmd/observe/flows_filter_test.go b/cmd/observe/flows_filter_test.go
index 7af611c1d..6955eea6c 100644
--- a/cmd/observe/flows_filter_test.go
+++ b/cmd/observe/flows_filter_test.go
@@ -575,3 +575,20 @@ func TestTrafficDirection(t *testing.T) {
})
}
}
+
+func TestHTTPURL(t *testing.T) {
+ f := newFlowFilter()
+ cmd := newFlowsCmdWithFilter(viper.New(), f)
+
+ require.NoError(t, cmd.Flags().Parse([]string{"--http-url", `http://.*cilium\.io/foo`, "--http-url", `http://www\.cilium\.io/bar`}))
+ if diff := cmp.Diff(
+ []*flowpb.FlowFilter{
+ {HttpUrl: []string{`http://.*cilium\.io/foo`, `http://www\.cilium\.io/bar`}},
+ },
+ f.whitelist.flowFilters(),
+ cmpopts.IgnoreUnexported(flowpb.FlowFilter{}),
+ ); diff != "" {
+ t.Errorf("mismatch (-want +got):\n%s", diff)
+ }
+ assert.Nil(t, f.blacklist)
+}
diff --git a/cmd/observe_help.txt b/cmd/observe_help.txt
index cebe02d01..55da75d8e 100644
--- a/cmd/observe_help.txt
+++ b/cmd/observe_help.txt
@@ -51,6 +51,7 @@ Filters Flags:
--http-method filter Show only flows which match this HTTP method (e.g. "get", "post")
--http-path filter Show only flows which match this HTTP path regular expressions (e.g. "/page/\\d+")
--http-status filter Show only flows which match this HTTP status code prefix (e.g. "404", "5+")
+ --http-url filter Show only flows which match this HTTP URL regular expressions (e.g. "http://.*cilium\.io/page/\\d+")
--identity filter Show all flows related to an endpoint with the given security identity
--ip filter Show all flows related to the given IP address. Each of the IPs can be specified as an exact match (e.g. '1.1.1.1') or as a CIDR range (e.g.'1.1.1.0/24').
--ip-version filter Show only IPv4, IPv6 flows or non IP flows (e.g. ARP packets) (ie: "none", "v4", "v6")
diff --git a/go.mod b/go.mod
index 00852a7de..d8c478810 100644
--- a/go.mod
+++ b/go.mod
@@ -1,9 +1,9 @@
module github.com/cilium/hubble
-go 1.21
+go 1.21.3
require (
- github.com/cilium/cilium v1.14.0-snapshot.5
+ github.com/cilium/cilium v1.15.0-pre.1.0.20231016085253-84f5b169c565
github.com/fatih/color v1.15.0
github.com/google/go-cmp v0.6.0
github.com/spf13/cast v1.5.1
@@ -18,41 +18,77 @@ require (
)
require (
+ github.com/armon/go-metrics v0.4.1 // indirect
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
+ github.com/beorn7/perks v1.0.1 // indirect
github.com/blang/semver/v4 v4.0.0 // indirect
+ github.com/cespare/xxhash/v2 v2.2.0 // indirect
+ github.com/cilium/ebpf v0.12.0 // indirect
+ github.com/cilium/proxy v0.0.0-20231011084420-05c54001182b // indirect
+ github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 // indirect
+ github.com/coreos/go-semver v0.3.1 // indirect
+ github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
+ github.com/emicklei/go-restful/v3 v3.10.2 // indirect
+ github.com/envoyproxy/protoc-gen-validate v1.0.2 // indirect
+ github.com/evanphx/json-patch v5.7.0+incompatible // indirect
github.com/fsnotify/fsnotify v1.6.0 // indirect
github.com/go-logr/logr v1.2.4 // indirect
+ github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-openapi/analysis v0.21.4 // indirect
github.com/go-openapi/errors v0.20.4 // indirect
github.com/go-openapi/jsonpointer v0.19.6 // indirect
- github.com/go-openapi/jsonreference v0.20.1 // indirect
+ github.com/go-openapi/jsonreference v0.20.2 // indirect
github.com/go-openapi/loads v0.21.2 // indirect
+ github.com/go-openapi/runtime v0.26.0 // indirect
github.com/go-openapi/spec v0.20.9 // indirect
github.com/go-openapi/strfmt v0.21.7 // indirect
github.com/go-openapi/swag v0.22.4 // indirect
github.com/go-openapi/validate v0.22.1 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
+ github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.3 // indirect
+ github.com/google/gnostic-models v0.6.8 // indirect
github.com/google/gofuzz v1.2.0 // indirect
+ github.com/google/uuid v1.3.1 // indirect
+ github.com/hashicorp/consul/api v1.25.1 // indirect
+ github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
+ github.com/hashicorp/go-hclog v1.5.0 // indirect
+ github.com/hashicorp/go-immutable-radix v1.3.1 // indirect
+ github.com/hashicorp/go-rootcerts v1.0.2 // indirect
+ github.com/hashicorp/golang-lru v0.5.4 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
+ github.com/hashicorp/serf v0.10.1 // indirect
+ github.com/imdario/mergo v0.3.12 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
+ github.com/kr/pretty v0.3.1 // indirect
+ github.com/kr/text v0.2.0 // indirect
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect
github.com/magiconair/properties v1.8.7 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.17 // indirect
+ github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
+ github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
+ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/oklog/ulid v1.3.1 // indirect
+ github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b // indirect
github.com/pelletier/go-toml/v2 v2.1.0 // indirect
github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 // indirect
+ github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
+ github.com/prometheus/client_golang v1.17.0 // indirect
+ github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 // indirect
+ github.com/prometheus/common v0.44.0 // indirect
+ github.com/prometheus/procfs v0.12.0 // indirect
+ github.com/rogpeppe/go-internal v1.10.0 // indirect
github.com/sagikazarmark/locafero v0.3.0 // indirect
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
github.com/sasha-s/go-deadlock v0.3.1 // indirect
@@ -63,32 +99,47 @@ require (
github.com/subosito/gotenv v1.6.0 // indirect
github.com/tklauser/go-sysconf v0.3.11 // indirect
github.com/tklauser/numcpus v0.6.0 // indirect
- github.com/vishvananda/netlink v1.2.1-beta.2.0.20230621221334-77712cff8739 // indirect
+ github.com/vishvananda/netlink v1.2.1-beta.2.0.20230807190133-6afddb37c1f0 // indirect
github.com/vishvananda/netns v0.0.4 // indirect
github.com/yusufpapurcu/wmi v1.2.3 // indirect
+ go.etcd.io/etcd/api/v3 v3.5.9 // indirect
+ go.etcd.io/etcd/client/pkg/v3 v3.5.9 // indirect
+ go.etcd.io/etcd/client/v3 v3.5.9 // indirect
go.mongodb.org/mongo-driver v1.11.3 // indirect
+ go.opentelemetry.io/otel v1.19.0 // indirect
+ go.opentelemetry.io/otel/metric v1.19.0 // indirect
+ go.opentelemetry.io/otel/trace v1.19.0 // indirect
+ go.uber.org/dig v1.17.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
+ go.uber.org/zap v1.26.0 // indirect
golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect
golang.org/x/net v0.17.0 // indirect
+ golang.org/x/oauth2 v0.12.0 // indirect
golang.org/x/sync v0.3.0 // indirect
+ golang.org/x/term v0.13.0 // indirect
golang.org/x/text v0.13.0 // indirect
golang.org/x/time v0.3.0 // indirect
+ google.golang.org/appengine v1.6.7 // indirect
+ google.golang.org/genproto v0.0.0-20230913181813-007df8e322eb // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20230913181813-007df8e322eb // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
- k8s.io/apimachinery v0.27.2 // indirect
- k8s.io/client-go v0.27.2 // indirect
+ k8s.io/api v0.28.1 // indirect
+ k8s.io/apiextensions-apiserver v0.28.1 // indirect
+ k8s.io/apimachinery v0.28.1 // indirect
+ k8s.io/client-go v0.28.1 // indirect
k8s.io/klog/v2 v2.100.1 // indirect
- k8s.io/utils v0.0.0-20230209194617-a36077c30491 // indirect
+ k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect
+ k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 // indirect
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
+ sigs.k8s.io/yaml v1.3.0 // indirect
)
// Replace directives from github.com/cilium/cilium. Keep in sync when updating Cilium!
replace (
- github.com/miekg/dns => github.com/cilium/dns v1.1.51-0.20220729113855-5b94b11b46fc
go.universe.tf/metallb => github.com/cilium/metallb v0.1.1-0.20220829170633-5d7dfb1129f7
- k8s.io/client-go => github.com/cilium/client-go v0.27.2-fix
- sigs.k8s.io/controller-tools => github.com/cilium/controller-tools v0.6.2
+ sigs.k8s.io/controller-tools => github.com/cilium/controller-tools v0.8.0-1
)
diff --git a/go.sum b/go.sum
index 39a63cf46..89d1a549c 100644
--- a/go.sum
+++ b/go.sum
@@ -36,18 +36,37 @@ cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RX
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
+github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU=
+github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
+github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
+github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
+github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
+github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=
+github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
+github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
+github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so=
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
+github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
+github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
+github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g=
+github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw=
+github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
@@ -55,38 +74,60 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/cilium/checkmate v1.0.3 h1:CQC5eOmlAZeEjPrVZY3ZwEBH64lHlx9mXYdUehEwI5w=
github.com/cilium/checkmate v1.0.3/go.mod h1:KiBTasf39/F2hf2yAmHw21YFl3hcEyP4Yk6filxc12A=
-github.com/cilium/cilium v1.14.0-snapshot.5 h1:eSJxw/j0vwx9EhQA/Exdsol1sIZiG/9qhgbyAZ7eGbU=
-github.com/cilium/cilium v1.14.0-snapshot.5/go.mod h1:Kn5pNB4Mb/5mhinclwvDtWPejmPaw5k11DGMjohdc3g=
-github.com/cilium/client-go v0.27.2-fix h1:dbFQGtP5Y1lFVBhkMCIAwyf/PUWFBn8M1ZTCKA6BUdw=
-github.com/cilium/client-go v0.27.2-fix/go.mod h1:tY0gVmUsHrAmjzHX9zs7eCjxcBsf8IiNe7KQ52biTcQ=
-github.com/cilium/proxy v0.0.0-20230623092907-8fddead4e52c h1:/NqY4jLr92f7VcUJe1gHS6CgSGWFUCeD2f4QhxO8tgE=
-github.com/cilium/proxy v0.0.0-20230623092907-8fddead4e52c/go.mod h1:iOlDXIgPGBabS7J0Npbq8MC5+gfvUGSBISnxXIJjfgs=
+github.com/cilium/cilium v1.15.0-pre.1.0.20231016085253-84f5b169c565 h1:dWqfSJCGq/bE5ucGRi75wYP7W0NzLyLUeJkOAg8uAMo=
+github.com/cilium/cilium v1.15.0-pre.1.0.20231016085253-84f5b169c565/go.mod h1:ossPkzVIGRFX3GngrGUy37x3b2+ooO37GAiQc6oObDw=
+github.com/cilium/ebpf v0.12.0 h1:oQEuIQIXgYhe1v7sYUG0P9vtJTYZLLdA6tiQmrOB1mo=
+github.com/cilium/ebpf v0.12.0/go.mod h1:u9H29/Iq+8cy70YqI6p5pfADkFl3vdnV2qXDg5JL0Zo=
+github.com/cilium/proxy v0.0.0-20231011084420-05c54001182b h1:0fiW6p1B6B7HC8fG5f2e+mTiMyVpGdHh1FO7RLgNk1Q=
+github.com/cilium/proxy v0.0.0-20231011084420-05c54001182b/go.mod h1:p044XccCmONGIUbx3bJ7qvHXK0RcrdvIvbTGiu/RjUA=
+github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
+github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
+github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k=
+github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4=
+github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
+github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
+github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/emicklei/go-restful/v3 v3.10.2 h1:hIovbnmBTLjHXkqEBUz3HGpXZdM7ZrE9fJIZIqlJLqE=
+github.com/emicklei/go-restful/v3 v3.10.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
+github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA=
+github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE=
+github.com/evanphx/json-patch v5.7.0+incompatible h1:vgGkfT/9f8zE6tvSCe74nfpAVDQ2tG6yudJd8LBksgI=
+github.com/evanphx/json-patch v5.7.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
+github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
+github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
+github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs=
github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw=
-github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY=
-github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
+github.com/frankban/quicktest v1.14.5 h1:dfYrrRyLtiqT9GyKXgdh+k4inNeTvmGbuSgZ3lx3GhA=
+github.com/frankban/quicktest v1.14.5/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
+github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
@@ -107,8 +148,8 @@ github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns=
github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo=
-github.com/go-openapi/jsonreference v0.20.1 h1:FBLnyygC4/IZZr893oiomc9XaghoveYTrLC1F86HID8=
-github.com/go-openapi/jsonreference v0.20.1/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
+github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
+github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
github.com/go-openapi/loads v0.21.1/go.mod h1:/DtAMXXneXFjbQMGEtbamCZb+4x7eGwkvZCvBmwUG+g=
github.com/go-openapi/loads v0.21.2 h1:r2a/xFIYeZ4Qd2TnGpWDIQNcP80dIaZgf704za8enro=
github.com/go-openapi/loads v0.21.2/go.mod h1:Jq58Os6SSGz0rzh62ptiu8Z31I+OTHqmULx5e/gJbNw=
@@ -132,6 +173,8 @@ github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+
github.com/go-openapi/validate v0.22.1 h1:G+c2ub6q47kfX1sOBLwIQwzBVt8qmOAARyo/9Fqs9NU=
github.com/go-openapi/validate v0.22.1/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
+github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
+github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0=
github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY=
github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg=
@@ -156,6 +199,8 @@ github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWe
github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ=
github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0=
github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw=
+github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
+github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
@@ -191,6 +236,10 @@ github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiu
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
+github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
+github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
+github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
@@ -221,30 +270,80 @@ github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hf
github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec=
+github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4=
+github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
+github.com/hashicorp/consul/api v1.25.1 h1:CqrdhYzc8XZuPnhIYZWH45toM0LB9ZeYr/gvpLVI3PE=
+github.com/hashicorp/consul/api v1.25.1/go.mod h1:iiLVwR/htV7mas/sy0O+XSuEnrdBUUydemjxcUrAt4g=
+github.com/hashicorp/consul/sdk v0.14.1 h1:ZiwE2bKb+zro68sWzZ1SgHF3kRMBZ94TwOCFRF4ylPs=
+github.com/hashicorp/consul/sdk v0.14.1/go.mod h1:vFt03juSzocLRFo59NkeQHHmQa6+g7oU0pfzdI1mUhg=
+github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
+github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
+github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
+github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
+github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c=
+github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
+github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
+github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc=
+github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
+github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
+github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI=
+github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
+github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
+github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA=
+github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
+github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
+github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
+github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc=
+github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
+github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
+github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc=
+github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A=
+github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
+github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8=
+github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/go-version v1.2.1 h1:zEfKbn2+PDgroKdiOzqiE8rsmLqU2uwi5PB5pBJ3TkI=
+github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=
+github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
+github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
+github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc=
+github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR/prTM=
+github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0=
+github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY=
+github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU=
+github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
+github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
+github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4=
github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
@@ -253,6 +352,7 @@ github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47e
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
+github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
@@ -272,13 +372,31 @@ github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE=
github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0=
+github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
+github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
+github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
+github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
+github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
+github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
+github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
+github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
+github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
+github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng=
github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
+github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
+github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
+github.com/miekg/dns v1.1.41 h1:WMszZWJG0XmzbK9FEmzH2TVcqYzFesusSIB41b8KHxY=
+github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI=
+github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI=
+github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
+github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
@@ -286,53 +404,86 @@ github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RR
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc=
+github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
+github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
+github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
+github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
+github.com/onsi/ginkgo/v2 v2.9.4 h1:xR7vG4IXt5RWx6FfIjyAtsoMAtnc3C/rFXBBd2AjZwE=
+github.com/onsi/ginkgo/v2 v2.9.4/go.mod h1:gCQYp2Q+kSoIj7ykSVb9nskRSsR6PUj4AiLywzIhbKM=
+github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI=
+github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M=
github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b h1:FfH+VrHHk6Lxt9HdVS0PXzSXFyS2NbZKXv33FYPol0A=
github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b/go.mod h1:AC62GU6hc0BrNm+9RK9VSiwa/EUe1bkIeFORAMcHvJU=
+github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
+github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
+github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE=
github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4=
github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc=
github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 h1:q2e307iGHPdTGp0hoxKjt1H5pDo6utceo3dQVK3I5XQ=
github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o=
+github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
+github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s=
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw=
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
-github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8=
-github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc=
+github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
+github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
+github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
+github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q=
+github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY=
+github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY=
-github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU=
-github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM=
-github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc=
-github.com/prometheus/procfs v0.11.0 h1:5EAgkfkMl659uZPbe9AS2N68a7Cc1TJbPEuGzFuRbyk=
-github.com/prometheus/procfs v0.11.0/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM=
+github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 h1:v7DLqVdK4VrYkVD5diGdl4sxJurKJEMnODWRJlxV9oM=
+github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU=
+github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
+github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY=
+github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY=
+github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
+github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
+github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
+github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/sagikazarmark/locafero v0.3.0 h1:zT7VEGWC2DTflmccN/5T1etyKvxSxpHsjb9cJvm4SvQ=
github.com/sagikazarmark/locafero v0.3.0/go.mod h1:w+v7UsPNFwzF1cHuOajOOzoq4U7v/ig1mpRjqV+Bu1U=
github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE=
github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ=
github.com/sasha-s/go-deadlock v0.3.1 h1:sqv7fDNShgjcaxkO0JNcOAlr8B9+cV5Ey/OB71efZx0=
github.com/sasha-s/go-deadlock v0.3.1/go.mod h1:F73l+cr82YSh10GxyRI6qZiCgK64VaZjwesgfQ1/iLM=
+github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
+github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/shirou/gopsutil/v3 v3.23.5 h1:5SgDCeQ0KW0S4N0znjeM/eFHXXOKyv2dVNgRq/c9P6Y=
github.com/shirou/gopsutil/v3 v3.23.5/go.mod h1:Ng3Maa27Q2KARVJ0SPZF5NdrQSC3XHKP8IIWrHgMeLY=
github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ=
github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k=
+github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
@@ -356,6 +507,7 @@ github.com/spf13/viper v1.17.0/go.mod h1:BmMMMLQXSbcHK6KAOiFLz0l5JHrU89OdIRHvsk0
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
+github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
@@ -364,6 +516,7 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
@@ -378,8 +531,9 @@ github.com/tklauser/go-sysconf v0.3.11 h1:89WgdJhk5SNwJfu+GKyYveZ4IaJ7xAkecBo+Kd
github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI=
github.com/tklauser/numcpus v0.6.0 h1:kebhY2Qt+3U6RNK7UqpYNA+tJ23IBEGKkB7JQBfDYms=
github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4=
-github.com/vishvananda/netlink v1.2.1-beta.2.0.20230621221334-77712cff8739 h1:mi+RH1U/MmAQvz2Ys7r1/8OWlGJoBvF8iCXRKk2uym4=
-github.com/vishvananda/netlink v1.2.1-beta.2.0.20230621221334-77712cff8739/go.mod h1:0BeLktV/jHb2/Hmw1yLD7+yaIB8PDy11RCty0tCPWZg=
+github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
+github.com/vishvananda/netlink v1.2.1-beta.2.0.20230807190133-6afddb37c1f0 h1:CLsXiDYQjYqJVntHkQZL2AW0R8BrvJu1K/hbs+2Q+EQ=
+github.com/vishvananda/netlink v1.2.1-beta.2.0.20230807190133-6afddb37c1f0/go.mod h1:whJevzBpTrid75eZy99s3DqCmy05NfibNaF2Ol5Ox5A=
github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8=
github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM=
@@ -395,6 +549,12 @@ github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw=
github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
+go.etcd.io/etcd/api/v3 v3.5.9 h1:4wSsluwyTbGGmyjJktOf3wFQoTBIURXHnq9n/G/JQHs=
+go.etcd.io/etcd/api/v3 v3.5.9/go.mod h1:uyAal843mC8uUVSLWz6eHa/d971iDGnCRpmKd2Z+X8k=
+go.etcd.io/etcd/client/pkg/v3 v3.5.9 h1:oidDC4+YEuSIQbsR94rY9gur91UPL6DnxDCIYd2IGsE=
+go.etcd.io/etcd/client/pkg/v3 v3.5.9/go.mod h1:y+CzeSmkMpWN2Jyu1npecjB9BBnABxGM4pN8cGuJeL4=
+go.etcd.io/etcd/client/v3 v3.5.9 h1:r5xghnU7CwbUxD/fbUtRyJGaYNfDun8sp/gTr1hew6E=
+go.etcd.io/etcd/client/v3 v3.5.9/go.mod h1:i/Eo5LrZ5IKqpbtpPDuaUnDOUv471oDg8cjQaUr2MbA=
go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg=
go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng=
go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8=
@@ -406,21 +566,28 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
-go.opentelemetry.io/otel v1.16.0 h1:Z7GVAX/UkAXPKsy94IU+i6thsQS4nb7LviLpnaNeW8s=
-go.opentelemetry.io/otel v1.16.0/go.mod h1:vl0h9NUa1D5s1nv3A5vZOYWn8av4K8Ml6JDeHrT/bx4=
-go.opentelemetry.io/otel/metric v1.16.0 h1:RbrpwVG1Hfv85LgnZ7+txXioPDoh6EdbZHo26Q3hqOo=
-go.opentelemetry.io/otel/metric v1.16.0/go.mod h1:QE47cpOmkwipPiefDwo2wDzwJrlfxxNYodqc4xnGCo4=
-go.opentelemetry.io/otel/trace v1.16.0 h1:8JRpaObFoW0pxuVPapkgH8UhHQj+bJW8jJsCZEu5MQs=
-go.opentelemetry.io/otel/trace v1.16.0/go.mod h1:Yt9vYq1SdNz3xdjZZK7wcXv1qv2pwLkqr2QVwea0ef0=
+go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs=
+go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY=
+go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE=
+go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8=
+go.opentelemetry.io/otel/sdk v1.14.0 h1:PDCppFRDq8A1jL9v6KMI6dYesaq+DFcDZvjsoGvxGzY=
+go.opentelemetry.io/otel/sdk v1.14.0/go.mod h1:bwIC5TjrNG6QDCHNWvW4HLHtUQ4I+VQDsnjhvyZCALM=
+go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg=
+go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo=
go.uber.org/dig v1.17.0 h1:5Chju+tUvcC+N7N6EV08BJz41UZuO3BmHcN4A287ZLI=
go.uber.org/dig v1.17.0/go.mod h1:rTxpf7l5I0eBTlE6/9RL+lDybC7WFwY2QH55ZSjy1mU=
+go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
+go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
+go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo=
+go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
@@ -464,6 +631,7 @@ golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
@@ -471,9 +639,11 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
+golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
@@ -493,6 +663,7 @@ golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwY
golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8=
golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
@@ -506,6 +677,8 @@ golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ
golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.12.0 h1:smVPGxink+n1ZI5pkQa8y6fZT0RW0MgCO5bFpepy4B4=
+golang.org/x/oauth2 v0.12.0/go.mod h1:A74bZ3aGXgCY0qaIC9Ahg6Lglin4AMAco8cIv9baba4=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -520,9 +693,12 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
+golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -535,10 +711,14 @@ golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -560,16 +740,22 @@ golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.9.1-0.20230616193735-e0c3b6e6ae3b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE=
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
@@ -608,6 +794,7 @@ golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgw
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
@@ -644,6 +831,8 @@ golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4f
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
+golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ=
+golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -673,6 +862,7 @@ google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
@@ -710,6 +900,10 @@ google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6D
google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20230913181813-007df8e322eb h1:XFBgcDwm7irdHTbz4Zk2h7Mh+eis4nfJEFQFYzJzuIA=
+google.golang.org/genproto v0.0.0-20230913181813-007df8e322eb/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4=
+google.golang.org/genproto/googleapis/api v0.0.0-20230913181813-007df8e322eb h1:lK0oleSc7IQsUxO3U5TjL9DWlsxpEBemh+zpB7IqhWI=
+google.golang.org/genproto/googleapis/api v0.0.0-20230913181813-007df8e322eb/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk=
google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 h1:N3bU/SQDCDyD6R528GJ/PwW9KjYcJA3dgyH+MovAkIM=
google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13/go.mod h1:KSqppvjFjtoCI+KGd4PELB0qLNxdJHRGqRI09mB6pQA=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
@@ -744,8 +938,10 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
@@ -754,8 +950,12 @@ gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
+gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
@@ -771,12 +971,20 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
-k8s.io/apimachinery v0.27.2 h1:vBjGaKKieaIreI+oQwELalVG4d8f3YAMNpWLzDXkxeg=
-k8s.io/apimachinery v0.27.2/go.mod h1:XNfZ6xklnMCOGGFNqXG7bUrQCoR04dh/E7FprV6pb+E=
+k8s.io/api v0.28.1 h1:i+0O8k2NPBCPYaMB+uCkseEbawEt/eFaiRqUx8aB108=
+k8s.io/api v0.28.1/go.mod h1:uBYwID+66wiL28Kn2tBjBYQdEU0Xk0z5qF8bIBqk/Dg=
+k8s.io/apiextensions-apiserver v0.28.1 h1:l2ThkBRjrWpw4f24uq0Da2HaEgqJZ7pcgiEUTKSmQZw=
+k8s.io/apiextensions-apiserver v0.28.1/go.mod h1:sVvrI+P4vxh2YBBcm8n2ThjNyzU4BQGilCQ/JAY5kGs=
+k8s.io/apimachinery v0.28.1 h1:EJD40og3GizBSV3mkIoXQBsws32okPOy+MkRyzh6nPY=
+k8s.io/apimachinery v0.28.1/go.mod h1:X0xh/chESs2hP9koe+SdIAcXWcQ+RM5hy0ZynB+yEvw=
+k8s.io/client-go v0.28.1 h1:pRhMzB8HyLfVwpngWKE8hDcXRqifh1ga2Z/PU9SXVK8=
+k8s.io/client-go v0.28.1/go.mod h1:pEZA3FqOsVkCc07pFVzK076R+P/eXqsgx5zuuRWukNE=
k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg=
k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
-k8s.io/utils v0.0.0-20230209194617-a36077c30491 h1:r0BAOLElQnnFhE/ApUsg3iHdVYYPBjNSSOMowRZxxsY=
-k8s.io/utils v0.0.0-20230209194617-a36077c30491/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
+k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ=
+k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM=
+k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 h1:qY1Ad8PODbnymg2pRbkyMT/ylpTrCM8P2RJ0yroCyIk=
+k8s.io/utils v0.0.0-20230406110748-d93618cff8a2/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
diff --git a/vendor/github.com/armon/go-metrics/.gitignore b/vendor/github.com/armon/go-metrics/.gitignore
new file mode 100644
index 000000000..e5750f572
--- /dev/null
+++ b/vendor/github.com/armon/go-metrics/.gitignore
@@ -0,0 +1,26 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+
+/metrics.out
+
+.idea
diff --git a/vendor/github.com/armon/go-metrics/.travis.yml b/vendor/github.com/armon/go-metrics/.travis.yml
new file mode 100644
index 000000000..87d230c8d
--- /dev/null
+++ b/vendor/github.com/armon/go-metrics/.travis.yml
@@ -0,0 +1,13 @@
+language: go
+
+go:
+ - "1.x"
+
+env:
+ - GO111MODULE=on
+
+install:
+ - go get ./...
+
+script:
+ - go test ./...
diff --git a/vendor/github.com/armon/go-metrics/LICENSE b/vendor/github.com/armon/go-metrics/LICENSE
new file mode 100644
index 000000000..106569e54
--- /dev/null
+++ b/vendor/github.com/armon/go-metrics/LICENSE
@@ -0,0 +1,20 @@
+The MIT License (MIT)
+
+Copyright (c) 2013 Armon Dadgar
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/armon/go-metrics/README.md b/vendor/github.com/armon/go-metrics/README.md
new file mode 100644
index 000000000..aa73348c0
--- /dev/null
+++ b/vendor/github.com/armon/go-metrics/README.md
@@ -0,0 +1,91 @@
+go-metrics
+==========
+
+This library provides a `metrics` package which can be used to instrument code,
+expose application metrics, and profile runtime performance in a flexible manner.
+
+Current API: [![GoDoc](https://godoc.org/github.com/armon/go-metrics?status.svg)](https://godoc.org/github.com/armon/go-metrics)
+
+Sinks
+-----
+
+The `metrics` package makes use of a `MetricSink` interface to support delivery
+to any type of backend. Currently the following sinks are provided:
+
+* StatsiteSink : Sinks to a [statsite](https://github.com/armon/statsite/) instance (TCP)
+* StatsdSink: Sinks to a [StatsD](https://github.com/etsy/statsd/) / statsite instance (UDP)
+* PrometheusSink: Sinks to a [Prometheus](http://prometheus.io/) metrics endpoint (exposed via HTTP for scrapes)
+* InmemSink : Provides in-memory aggregation, can be used to export stats
+* FanoutSink : Sinks to multiple sinks. Enables writing to multiple statsite instances for example.
+* BlackholeSink : Sinks to nowhere
+
+In addition to the sinks, the `InmemSignal` can be used to catch a signal,
+and dump a formatted output of recent metrics. For example, when a process gets
+a SIGUSR1, it can dump to stderr recent performance metrics for debugging.
+
+Labels
+------
+
+Most metrics do have an equivalent ending with `WithLabels`, such methods
+allow to push metrics with labels and use some features of underlying Sinks
+(ex: translated into Prometheus labels).
+
+Since some of these labels may increase greatly cardinality of metrics, the
+library allow to filter labels using a blacklist/whitelist filtering system
+which is global to all metrics.
+
+* If `Config.AllowedLabels` is not nil, then only labels specified in this value will be sent to underlying Sink, otherwise, all labels are sent by default.
+* If `Config.BlockedLabels` is not nil, any label specified in this value will not be sent to underlying Sinks.
+
+By default, both `Config.AllowedLabels` and `Config.BlockedLabels` are nil, meaning that
+no tags are filetered at all, but it allow to a user to globally block some tags with high
+cardinality at application level.
+
+Examples
+--------
+
+Here is an example of using the package:
+
+```go
+func SlowMethod() {
+ // Profiling the runtime of a method
+ defer metrics.MeasureSince([]string{"SlowMethod"}, time.Now())
+}
+
+// Configure a statsite sink as the global metrics sink
+sink, _ := metrics.NewStatsiteSink("statsite:8125")
+metrics.NewGlobal(metrics.DefaultConfig("service-name"), sink)
+
+// Emit a Key/Value pair
+metrics.EmitKey([]string{"questions", "meaning of life"}, 42)
+```
+
+Here is an example of setting up a signal handler:
+
+```go
+// Setup the inmem sink and signal handler
+inm := metrics.NewInmemSink(10*time.Second, time.Minute)
+sig := metrics.DefaultInmemSignal(inm)
+metrics.NewGlobal(metrics.DefaultConfig("service-name"), inm)
+
+// Run some code
+inm.SetGauge([]string{"foo"}, 42)
+inm.EmitKey([]string{"bar"}, 30)
+
+inm.IncrCounter([]string{"baz"}, 42)
+inm.IncrCounter([]string{"baz"}, 1)
+inm.IncrCounter([]string{"baz"}, 80)
+
+inm.AddSample([]string{"method", "wow"}, 42)
+inm.AddSample([]string{"method", "wow"}, 100)
+inm.AddSample([]string{"method", "wow"}, 22)
+
+....
+```
+
+When a signal comes in, output like the following will be dumped to stderr:
+
+ [2014-01-28 14:57:33.04 -0800 PST][G] 'foo': 42.000
+ [2014-01-28 14:57:33.04 -0800 PST][P] 'bar': 30.000
+ [2014-01-28 14:57:33.04 -0800 PST][C] 'baz': Count: 3 Min: 1.000 Mean: 41.000 Max: 80.000 Stddev: 39.509
+ [2014-01-28 14:57:33.04 -0800 PST][S] 'method.wow': Count: 3 Min: 22.000 Mean: 54.667 Max: 100.000 Stddev: 40.513
\ No newline at end of file
diff --git a/vendor/github.com/armon/go-metrics/const_unix.go b/vendor/github.com/armon/go-metrics/const_unix.go
new file mode 100644
index 000000000..31098dd57
--- /dev/null
+++ b/vendor/github.com/armon/go-metrics/const_unix.go
@@ -0,0 +1,12 @@
+// +build !windows
+
+package metrics
+
+import (
+ "syscall"
+)
+
+const (
+ // DefaultSignal is used with DefaultInmemSignal
+ DefaultSignal = syscall.SIGUSR1
+)
diff --git a/vendor/github.com/armon/go-metrics/const_windows.go b/vendor/github.com/armon/go-metrics/const_windows.go
new file mode 100644
index 000000000..38136af3e
--- /dev/null
+++ b/vendor/github.com/armon/go-metrics/const_windows.go
@@ -0,0 +1,13 @@
+// +build windows
+
+package metrics
+
+import (
+ "syscall"
+)
+
+const (
+ // DefaultSignal is used with DefaultInmemSignal
+ // Windows has no SIGUSR1, use SIGBREAK
+ DefaultSignal = syscall.Signal(21)
+)
diff --git a/vendor/github.com/armon/go-metrics/inmem.go b/vendor/github.com/armon/go-metrics/inmem.go
new file mode 100644
index 000000000..7c427aca9
--- /dev/null
+++ b/vendor/github.com/armon/go-metrics/inmem.go
@@ -0,0 +1,339 @@
+package metrics
+
+import (
+ "bytes"
+ "fmt"
+ "math"
+ "net/url"
+ "strings"
+ "sync"
+ "time"
+)
+
+var spaceReplacer = strings.NewReplacer(" ", "_")
+
+// InmemSink provides a MetricSink that does in-memory aggregation
+// without sending metrics over a network. It can be embedded within
+// an application to provide profiling information.
+type InmemSink struct {
+ // How long is each aggregation interval
+ interval time.Duration
+
+ // Retain controls how many metrics interval we keep
+ retain time.Duration
+
+ // maxIntervals is the maximum length of intervals.
+ // It is retain / interval.
+ maxIntervals int
+
+ // intervals is a slice of the retained intervals
+ intervals []*IntervalMetrics
+ intervalLock sync.RWMutex
+
+ rateDenom float64
+}
+
+// IntervalMetrics stores the aggregated metrics
+// for a specific interval
+type IntervalMetrics struct {
+ sync.RWMutex
+
+ // The start time of the interval
+ Interval time.Time
+
+ // Gauges maps the key to the last set value
+ Gauges map[string]GaugeValue
+
+ // Points maps the string to the list of emitted values
+ // from EmitKey
+ Points map[string][]float32
+
+ // Counters maps the string key to a sum of the counter
+ // values
+ Counters map[string]SampledValue
+
+ // Samples maps the key to an AggregateSample,
+ // which has the rolled up view of a sample
+ Samples map[string]SampledValue
+
+ // done is closed when this interval has ended, and a new IntervalMetrics
+ // has been created to receive any future metrics.
+ done chan struct{}
+}
+
+// NewIntervalMetrics creates a new IntervalMetrics for a given interval
+func NewIntervalMetrics(intv time.Time) *IntervalMetrics {
+ return &IntervalMetrics{
+ Interval: intv,
+ Gauges: make(map[string]GaugeValue),
+ Points: make(map[string][]float32),
+ Counters: make(map[string]SampledValue),
+ Samples: make(map[string]SampledValue),
+ done: make(chan struct{}),
+ }
+}
+
+// AggregateSample is used to hold aggregate metrics
+// about a sample
+type AggregateSample struct {
+ Count int // The count of emitted pairs
+ Rate float64 // The values rate per time unit (usually 1 second)
+ Sum float64 // The sum of values
+ SumSq float64 `json:"-"` // The sum of squared values
+ Min float64 // Minimum value
+ Max float64 // Maximum value
+ LastUpdated time.Time `json:"-"` // When value was last updated
+}
+
+// Computes a Stddev of the values
+func (a *AggregateSample) Stddev() float64 {
+ num := (float64(a.Count) * a.SumSq) - math.Pow(a.Sum, 2)
+ div := float64(a.Count * (a.Count - 1))
+ if div == 0 {
+ return 0
+ }
+ return math.Sqrt(num / div)
+}
+
+// Computes a mean of the values
+func (a *AggregateSample) Mean() float64 {
+ if a.Count == 0 {
+ return 0
+ }
+ return a.Sum / float64(a.Count)
+}
+
+// Ingest is used to update a sample
+func (a *AggregateSample) Ingest(v float64, rateDenom float64) {
+ a.Count++
+ a.Sum += v
+ a.SumSq += (v * v)
+ if v < a.Min || a.Count == 1 {
+ a.Min = v
+ }
+ if v > a.Max || a.Count == 1 {
+ a.Max = v
+ }
+ a.Rate = float64(a.Sum) / rateDenom
+ a.LastUpdated = time.Now()
+}
+
+func (a *AggregateSample) String() string {
+ if a.Count == 0 {
+ return "Count: 0"
+ } else if a.Stddev() == 0 {
+ return fmt.Sprintf("Count: %d Sum: %0.3f LastUpdated: %s", a.Count, a.Sum, a.LastUpdated)
+ } else {
+ return fmt.Sprintf("Count: %d Min: %0.3f Mean: %0.3f Max: %0.3f Stddev: %0.3f Sum: %0.3f LastUpdated: %s",
+ a.Count, a.Min, a.Mean(), a.Max, a.Stddev(), a.Sum, a.LastUpdated)
+ }
+}
+
+// NewInmemSinkFromURL creates an InmemSink from a URL. It is used
+// (and tested) from NewMetricSinkFromURL.
+func NewInmemSinkFromURL(u *url.URL) (MetricSink, error) {
+ params := u.Query()
+
+ interval, err := time.ParseDuration(params.Get("interval"))
+ if err != nil {
+ return nil, fmt.Errorf("Bad 'interval' param: %s", err)
+ }
+
+ retain, err := time.ParseDuration(params.Get("retain"))
+ if err != nil {
+ return nil, fmt.Errorf("Bad 'retain' param: %s", err)
+ }
+
+ return NewInmemSink(interval, retain), nil
+}
+
+// NewInmemSink is used to construct a new in-memory sink.
+// Uses an aggregation interval and maximum retention period.
+func NewInmemSink(interval, retain time.Duration) *InmemSink {
+ rateTimeUnit := time.Second
+ i := &InmemSink{
+ interval: interval,
+ retain: retain,
+ maxIntervals: int(retain / interval),
+ rateDenom: float64(interval.Nanoseconds()) / float64(rateTimeUnit.Nanoseconds()),
+ }
+ i.intervals = make([]*IntervalMetrics, 0, i.maxIntervals)
+ return i
+}
+
+func (i *InmemSink) SetGauge(key []string, val float32) {
+ i.SetGaugeWithLabels(key, val, nil)
+}
+
+func (i *InmemSink) SetGaugeWithLabels(key []string, val float32, labels []Label) {
+ k, name := i.flattenKeyLabels(key, labels)
+ intv := i.getInterval()
+
+ intv.Lock()
+ defer intv.Unlock()
+ intv.Gauges[k] = GaugeValue{Name: name, Value: val, Labels: labels}
+}
+
+func (i *InmemSink) EmitKey(key []string, val float32) {
+ k := i.flattenKey(key)
+ intv := i.getInterval()
+
+ intv.Lock()
+ defer intv.Unlock()
+ vals := intv.Points[k]
+ intv.Points[k] = append(vals, val)
+}
+
+func (i *InmemSink) IncrCounter(key []string, val float32) {
+ i.IncrCounterWithLabels(key, val, nil)
+}
+
+func (i *InmemSink) IncrCounterWithLabels(key []string, val float32, labels []Label) {
+ k, name := i.flattenKeyLabels(key, labels)
+ intv := i.getInterval()
+
+ intv.Lock()
+ defer intv.Unlock()
+
+ agg, ok := intv.Counters[k]
+ if !ok {
+ agg = SampledValue{
+ Name: name,
+ AggregateSample: &AggregateSample{},
+ Labels: labels,
+ }
+ intv.Counters[k] = agg
+ }
+ agg.Ingest(float64(val), i.rateDenom)
+}
+
+func (i *InmemSink) AddSample(key []string, val float32) {
+ i.AddSampleWithLabels(key, val, nil)
+}
+
+func (i *InmemSink) AddSampleWithLabels(key []string, val float32, labels []Label) {
+ k, name := i.flattenKeyLabels(key, labels)
+ intv := i.getInterval()
+
+ intv.Lock()
+ defer intv.Unlock()
+
+ agg, ok := intv.Samples[k]
+ if !ok {
+ agg = SampledValue{
+ Name: name,
+ AggregateSample: &AggregateSample{},
+ Labels: labels,
+ }
+ intv.Samples[k] = agg
+ }
+ agg.Ingest(float64(val), i.rateDenom)
+}
+
+// Data is used to retrieve all the aggregated metrics
+// Intervals may be in use, and a read lock should be acquired
+func (i *InmemSink) Data() []*IntervalMetrics {
+ // Get the current interval, forces creation
+ i.getInterval()
+
+ i.intervalLock.RLock()
+ defer i.intervalLock.RUnlock()
+
+ n := len(i.intervals)
+ intervals := make([]*IntervalMetrics, n)
+
+ copy(intervals[:n-1], i.intervals[:n-1])
+ current := i.intervals[n-1]
+
+ // make its own copy for current interval
+ intervals[n-1] = &IntervalMetrics{}
+ copyCurrent := intervals[n-1]
+ current.RLock()
+ *copyCurrent = *current
+ // RWMutex is not safe to copy, so create a new instance on the copy
+ copyCurrent.RWMutex = sync.RWMutex{}
+
+ copyCurrent.Gauges = make(map[string]GaugeValue, len(current.Gauges))
+ for k, v := range current.Gauges {
+ copyCurrent.Gauges[k] = v
+ }
+ // saved values will be not change, just copy its link
+ copyCurrent.Points = make(map[string][]float32, len(current.Points))
+ for k, v := range current.Points {
+ copyCurrent.Points[k] = v
+ }
+ copyCurrent.Counters = make(map[string]SampledValue, len(current.Counters))
+ for k, v := range current.Counters {
+ copyCurrent.Counters[k] = v.deepCopy()
+ }
+ copyCurrent.Samples = make(map[string]SampledValue, len(current.Samples))
+ for k, v := range current.Samples {
+ copyCurrent.Samples[k] = v.deepCopy()
+ }
+ current.RUnlock()
+
+ return intervals
+}
+
+// getInterval returns the current interval. A new interval is created if no
+// previous interval exists, or if the current time is beyond the window for the
+// current interval.
+func (i *InmemSink) getInterval() *IntervalMetrics {
+ intv := time.Now().Truncate(i.interval)
+
+ // Attempt to return the existing interval first, because it only requires
+ // a read lock.
+ i.intervalLock.RLock()
+ n := len(i.intervals)
+ if n > 0 && i.intervals[n-1].Interval == intv {
+ defer i.intervalLock.RUnlock()
+ return i.intervals[n-1]
+ }
+ i.intervalLock.RUnlock()
+
+ i.intervalLock.Lock()
+ defer i.intervalLock.Unlock()
+
+ // Re-check for an existing interval now that the lock is re-acquired.
+ n = len(i.intervals)
+ if n > 0 && i.intervals[n-1].Interval == intv {
+ return i.intervals[n-1]
+ }
+
+ current := NewIntervalMetrics(intv)
+ i.intervals = append(i.intervals, current)
+ if n > 0 {
+ close(i.intervals[n-1].done)
+ }
+
+ n++
+ // Prune old intervals if the count exceeds the max.
+ if n >= i.maxIntervals {
+ copy(i.intervals[0:], i.intervals[n-i.maxIntervals:])
+ i.intervals = i.intervals[:i.maxIntervals]
+ }
+ return current
+}
+
+// Flattens the key for formatting, removes spaces
+func (i *InmemSink) flattenKey(parts []string) string {
+ buf := &bytes.Buffer{}
+
+ joined := strings.Join(parts, ".")
+
+ spaceReplacer.WriteString(buf, joined)
+
+ return buf.String()
+}
+
+// Flattens the key for formatting along with its labels, removes spaces
+func (i *InmemSink) flattenKeyLabels(parts []string, labels []Label) (string, string) {
+ key := i.flattenKey(parts)
+ buf := bytes.NewBufferString(key)
+
+ for _, label := range labels {
+ spaceReplacer.WriteString(buf, fmt.Sprintf(";%s=%s", label.Name, label.Value))
+ }
+
+ return buf.String(), key
+}
diff --git a/vendor/github.com/armon/go-metrics/inmem_endpoint.go b/vendor/github.com/armon/go-metrics/inmem_endpoint.go
new file mode 100644
index 000000000..24eefa963
--- /dev/null
+++ b/vendor/github.com/armon/go-metrics/inmem_endpoint.go
@@ -0,0 +1,162 @@
+package metrics
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+ "sort"
+ "time"
+)
+
+// MetricsSummary holds a roll-up of metrics info for a given interval
+type MetricsSummary struct {
+ Timestamp string
+ Gauges []GaugeValue
+ Points []PointValue
+ Counters []SampledValue
+ Samples []SampledValue
+}
+
+type GaugeValue struct {
+ Name string
+ Hash string `json:"-"`
+ Value float32
+
+ Labels []Label `json:"-"`
+ DisplayLabels map[string]string `json:"Labels"`
+}
+
+type PointValue struct {
+ Name string
+ Points []float32
+}
+
+type SampledValue struct {
+ Name string
+ Hash string `json:"-"`
+ *AggregateSample
+ Mean float64
+ Stddev float64
+
+ Labels []Label `json:"-"`
+ DisplayLabels map[string]string `json:"Labels"`
+}
+
+// deepCopy allocates a new instance of AggregateSample
+func (source *SampledValue) deepCopy() SampledValue {
+ dest := *source
+ if source.AggregateSample != nil {
+ dest.AggregateSample = &AggregateSample{}
+ *dest.AggregateSample = *source.AggregateSample
+ }
+ return dest
+}
+
+// DisplayMetrics returns a summary of the metrics from the most recent finished interval.
+func (i *InmemSink) DisplayMetrics(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
+ data := i.Data()
+
+ var interval *IntervalMetrics
+ n := len(data)
+ switch {
+ case n == 0:
+ return nil, fmt.Errorf("no metric intervals have been initialized yet")
+ case n == 1:
+ // Show the current interval if it's all we have
+ interval = data[0]
+ default:
+ // Show the most recent finished interval if we have one
+ interval = data[n-2]
+ }
+
+ return newMetricSummaryFromInterval(interval), nil
+}
+
+func newMetricSummaryFromInterval(interval *IntervalMetrics) MetricsSummary {
+ interval.RLock()
+ defer interval.RUnlock()
+
+ summary := MetricsSummary{
+ Timestamp: interval.Interval.Round(time.Second).UTC().String(),
+ Gauges: make([]GaugeValue, 0, len(interval.Gauges)),
+ Points: make([]PointValue, 0, len(interval.Points)),
+ }
+
+ // Format and sort the output of each metric type, so it gets displayed in a
+ // deterministic order.
+ for name, points := range interval.Points {
+ summary.Points = append(summary.Points, PointValue{name, points})
+ }
+ sort.Slice(summary.Points, func(i, j int) bool {
+ return summary.Points[i].Name < summary.Points[j].Name
+ })
+
+ for hash, value := range interval.Gauges {
+ value.Hash = hash
+ value.DisplayLabels = make(map[string]string)
+ for _, label := range value.Labels {
+ value.DisplayLabels[label.Name] = label.Value
+ }
+ value.Labels = nil
+
+ summary.Gauges = append(summary.Gauges, value)
+ }
+ sort.Slice(summary.Gauges, func(i, j int) bool {
+ return summary.Gauges[i].Hash < summary.Gauges[j].Hash
+ })
+
+ summary.Counters = formatSamples(interval.Counters)
+ summary.Samples = formatSamples(interval.Samples)
+
+ return summary
+}
+
+func formatSamples(source map[string]SampledValue) []SampledValue {
+ output := make([]SampledValue, 0, len(source))
+ for hash, sample := range source {
+ displayLabels := make(map[string]string)
+ for _, label := range sample.Labels {
+ displayLabels[label.Name] = label.Value
+ }
+
+ output = append(output, SampledValue{
+ Name: sample.Name,
+ Hash: hash,
+ AggregateSample: sample.AggregateSample,
+ Mean: sample.AggregateSample.Mean(),
+ Stddev: sample.AggregateSample.Stddev(),
+ DisplayLabels: displayLabels,
+ })
+ }
+ sort.Slice(output, func(i, j int) bool {
+ return output[i].Hash < output[j].Hash
+ })
+
+ return output
+}
+
+type Encoder interface {
+ Encode(interface{}) error
+}
+
+// Stream writes metrics using encoder.Encode each time an interval ends. Runs
+// until the request context is cancelled, or the encoder returns an error.
+// The caller is responsible for logging any errors from encoder.
+func (i *InmemSink) Stream(ctx context.Context, encoder Encoder) {
+ interval := i.getInterval()
+
+ for {
+ select {
+ case <-interval.done:
+ summary := newMetricSummaryFromInterval(interval)
+ if err := encoder.Encode(summary); err != nil {
+ return
+ }
+
+ // update interval to the next one
+ interval = i.getInterval()
+ case <-ctx.Done():
+ return
+ }
+ }
+}
diff --git a/vendor/github.com/armon/go-metrics/inmem_signal.go b/vendor/github.com/armon/go-metrics/inmem_signal.go
new file mode 100644
index 000000000..0937f4aed
--- /dev/null
+++ b/vendor/github.com/armon/go-metrics/inmem_signal.go
@@ -0,0 +1,117 @@
+package metrics
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "os/signal"
+ "strings"
+ "sync"
+ "syscall"
+)
+
+// InmemSignal is used to listen for a given signal, and when received,
+// to dump the current metrics from the InmemSink to an io.Writer
+type InmemSignal struct {
+ signal syscall.Signal
+ inm *InmemSink
+ w io.Writer
+ sigCh chan os.Signal
+
+ stop bool
+ stopCh chan struct{}
+ stopLock sync.Mutex
+}
+
+// NewInmemSignal creates a new InmemSignal which listens for a given signal,
+// and dumps the current metrics out to a writer
+func NewInmemSignal(inmem *InmemSink, sig syscall.Signal, w io.Writer) *InmemSignal {
+ i := &InmemSignal{
+ signal: sig,
+ inm: inmem,
+ w: w,
+ sigCh: make(chan os.Signal, 1),
+ stopCh: make(chan struct{}),
+ }
+ signal.Notify(i.sigCh, sig)
+ go i.run()
+ return i
+}
+
+// DefaultInmemSignal returns a new InmemSignal that responds to SIGUSR1
+// and writes output to stderr. Windows uses SIGBREAK
+func DefaultInmemSignal(inmem *InmemSink) *InmemSignal {
+ return NewInmemSignal(inmem, DefaultSignal, os.Stderr)
+}
+
+// Stop is used to stop the InmemSignal from listening
+func (i *InmemSignal) Stop() {
+ i.stopLock.Lock()
+ defer i.stopLock.Unlock()
+
+ if i.stop {
+ return
+ }
+ i.stop = true
+ close(i.stopCh)
+ signal.Stop(i.sigCh)
+}
+
+// run is a long running routine that handles signals
+func (i *InmemSignal) run() {
+ for {
+ select {
+ case <-i.sigCh:
+ i.dumpStats()
+ case <-i.stopCh:
+ return
+ }
+ }
+}
+
+// dumpStats is used to dump the data to output writer
+func (i *InmemSignal) dumpStats() {
+ buf := bytes.NewBuffer(nil)
+
+ data := i.inm.Data()
+ // Skip the last period which is still being aggregated
+ for j := 0; j < len(data)-1; j++ {
+ intv := data[j]
+ intv.RLock()
+ for _, val := range intv.Gauges {
+ name := i.flattenLabels(val.Name, val.Labels)
+ fmt.Fprintf(buf, "[%v][G] '%s': %0.3f\n", intv.Interval, name, val.Value)
+ }
+ for name, vals := range intv.Points {
+ for _, val := range vals {
+ fmt.Fprintf(buf, "[%v][P] '%s': %0.3f\n", intv.Interval, name, val)
+ }
+ }
+ for _, agg := range intv.Counters {
+ name := i.flattenLabels(agg.Name, agg.Labels)
+ fmt.Fprintf(buf, "[%v][C] '%s': %s\n", intv.Interval, name, agg.AggregateSample)
+ }
+ for _, agg := range intv.Samples {
+ name := i.flattenLabels(agg.Name, agg.Labels)
+ fmt.Fprintf(buf, "[%v][S] '%s': %s\n", intv.Interval, name, agg.AggregateSample)
+ }
+ intv.RUnlock()
+ }
+
+ // Write out the bytes
+ i.w.Write(buf.Bytes())
+}
+
+// Flattens the key for formatting along with its labels, removes spaces
+func (i *InmemSignal) flattenLabels(name string, labels []Label) string {
+ buf := bytes.NewBufferString(name)
+ replacer := strings.NewReplacer(" ", "_", ":", "_")
+
+ for _, label := range labels {
+ replacer.WriteString(buf, ".")
+ replacer.WriteString(buf, label.Value)
+ }
+
+ return buf.String()
+}
diff --git a/vendor/github.com/armon/go-metrics/metrics.go b/vendor/github.com/armon/go-metrics/metrics.go
new file mode 100644
index 000000000..36642a429
--- /dev/null
+++ b/vendor/github.com/armon/go-metrics/metrics.go
@@ -0,0 +1,299 @@
+package metrics
+
+import (
+ "runtime"
+ "strings"
+ "time"
+
+ iradix "github.com/hashicorp/go-immutable-radix"
+)
+
+type Label struct {
+ Name string
+ Value string
+}
+
+func (m *Metrics) SetGauge(key []string, val float32) {
+ m.SetGaugeWithLabels(key, val, nil)
+}
+
+func (m *Metrics) SetGaugeWithLabels(key []string, val float32, labels []Label) {
+ if m.HostName != "" {
+ if m.EnableHostnameLabel {
+ labels = append(labels, Label{"host", m.HostName})
+ } else if m.EnableHostname {
+ key = insert(0, m.HostName, key)
+ }
+ }
+ if m.EnableTypePrefix {
+ key = insert(0, "gauge", key)
+ }
+ if m.ServiceName != "" {
+ if m.EnableServiceLabel {
+ labels = append(labels, Label{"service", m.ServiceName})
+ } else {
+ key = insert(0, m.ServiceName, key)
+ }
+ }
+ allowed, labelsFiltered := m.allowMetric(key, labels)
+ if !allowed {
+ return
+ }
+ m.sink.SetGaugeWithLabels(key, val, labelsFiltered)
+}
+
+func (m *Metrics) EmitKey(key []string, val float32) {
+ if m.EnableTypePrefix {
+ key = insert(0, "kv", key)
+ }
+ if m.ServiceName != "" {
+ key = insert(0, m.ServiceName, key)
+ }
+ allowed, _ := m.allowMetric(key, nil)
+ if !allowed {
+ return
+ }
+ m.sink.EmitKey(key, val)
+}
+
+func (m *Metrics) IncrCounter(key []string, val float32) {
+ m.IncrCounterWithLabels(key, val, nil)
+}
+
+func (m *Metrics) IncrCounterWithLabels(key []string, val float32, labels []Label) {
+ if m.HostName != "" && m.EnableHostnameLabel {
+ labels = append(labels, Label{"host", m.HostName})
+ }
+ if m.EnableTypePrefix {
+ key = insert(0, "counter", key)
+ }
+ if m.ServiceName != "" {
+ if m.EnableServiceLabel {
+ labels = append(labels, Label{"service", m.ServiceName})
+ } else {
+ key = insert(0, m.ServiceName, key)
+ }
+ }
+ allowed, labelsFiltered := m.allowMetric(key, labels)
+ if !allowed {
+ return
+ }
+ m.sink.IncrCounterWithLabels(key, val, labelsFiltered)
+}
+
+func (m *Metrics) AddSample(key []string, val float32) {
+ m.AddSampleWithLabels(key, val, nil)
+}
+
+func (m *Metrics) AddSampleWithLabels(key []string, val float32, labels []Label) {
+ if m.HostName != "" && m.EnableHostnameLabel {
+ labels = append(labels, Label{"host", m.HostName})
+ }
+ if m.EnableTypePrefix {
+ key = insert(0, "sample", key)
+ }
+ if m.ServiceName != "" {
+ if m.EnableServiceLabel {
+ labels = append(labels, Label{"service", m.ServiceName})
+ } else {
+ key = insert(0, m.ServiceName, key)
+ }
+ }
+ allowed, labelsFiltered := m.allowMetric(key, labels)
+ if !allowed {
+ return
+ }
+ m.sink.AddSampleWithLabels(key, val, labelsFiltered)
+}
+
+func (m *Metrics) MeasureSince(key []string, start time.Time) {
+ m.MeasureSinceWithLabels(key, start, nil)
+}
+
+func (m *Metrics) MeasureSinceWithLabels(key []string, start time.Time, labels []Label) {
+ if m.HostName != "" && m.EnableHostnameLabel {
+ labels = append(labels, Label{"host", m.HostName})
+ }
+ if m.EnableTypePrefix {
+ key = insert(0, "timer", key)
+ }
+ if m.ServiceName != "" {
+ if m.EnableServiceLabel {
+ labels = append(labels, Label{"service", m.ServiceName})
+ } else {
+ key = insert(0, m.ServiceName, key)
+ }
+ }
+ allowed, labelsFiltered := m.allowMetric(key, labels)
+ if !allowed {
+ return
+ }
+ now := time.Now()
+ elapsed := now.Sub(start)
+ msec := float32(elapsed.Nanoseconds()) / float32(m.TimerGranularity)
+ m.sink.AddSampleWithLabels(key, msec, labelsFiltered)
+}
+
+// UpdateFilter overwrites the existing filter with the given rules.
+func (m *Metrics) UpdateFilter(allow, block []string) {
+ m.UpdateFilterAndLabels(allow, block, m.AllowedLabels, m.BlockedLabels)
+}
+
+// UpdateFilterAndLabels overwrites the existing filter with the given rules.
+func (m *Metrics) UpdateFilterAndLabels(allow, block, allowedLabels, blockedLabels []string) {
+ m.filterLock.Lock()
+ defer m.filterLock.Unlock()
+
+ m.AllowedPrefixes = allow
+ m.BlockedPrefixes = block
+
+ if allowedLabels == nil {
+ // Having a white list means we take only elements from it
+ m.allowedLabels = nil
+ } else {
+ m.allowedLabels = make(map[string]bool)
+ for _, v := range allowedLabels {
+ m.allowedLabels[v] = true
+ }
+ }
+ m.blockedLabels = make(map[string]bool)
+ for _, v := range blockedLabels {
+ m.blockedLabels[v] = true
+ }
+ m.AllowedLabels = allowedLabels
+ m.BlockedLabels = blockedLabels
+
+ m.filter = iradix.New()
+ for _, prefix := range m.AllowedPrefixes {
+ m.filter, _, _ = m.filter.Insert([]byte(prefix), true)
+ }
+ for _, prefix := range m.BlockedPrefixes {
+ m.filter, _, _ = m.filter.Insert([]byte(prefix), false)
+ }
+}
+
+func (m *Metrics) Shutdown() {
+ if ss, ok := m.sink.(ShutdownSink); ok {
+ ss.Shutdown()
+ }
+}
+
+// labelIsAllowed return true if a should be included in metric
+// the caller should lock m.filterLock while calling this method
+func (m *Metrics) labelIsAllowed(label *Label) bool {
+ labelName := (*label).Name
+ if m.blockedLabels != nil {
+ _, ok := m.blockedLabels[labelName]
+ if ok {
+ // If present, let's remove this label
+ return false
+ }
+ }
+ if m.allowedLabels != nil {
+ _, ok := m.allowedLabels[labelName]
+ return ok
+ }
+ // Allow by default
+ return true
+}
+
+// filterLabels return only allowed labels
+// the caller should lock m.filterLock while calling this method
+func (m *Metrics) filterLabels(labels []Label) []Label {
+ if labels == nil {
+ return nil
+ }
+ toReturn := []Label{}
+ for _, label := range labels {
+ if m.labelIsAllowed(&label) {
+ toReturn = append(toReturn, label)
+ }
+ }
+ return toReturn
+}
+
+// Returns whether the metric should be allowed based on configured prefix filters
+// Also return the applicable labels
+func (m *Metrics) allowMetric(key []string, labels []Label) (bool, []Label) {
+ m.filterLock.RLock()
+ defer m.filterLock.RUnlock()
+
+ if m.filter == nil || m.filter.Len() == 0 {
+ return m.Config.FilterDefault, m.filterLabels(labels)
+ }
+
+ _, allowed, ok := m.filter.Root().LongestPrefix([]byte(strings.Join(key, ".")))
+ if !ok {
+ return m.Config.FilterDefault, m.filterLabels(labels)
+ }
+
+ return allowed.(bool), m.filterLabels(labels)
+}
+
+// Periodically collects runtime stats to publish
+func (m *Metrics) collectStats() {
+ for {
+ time.Sleep(m.ProfileInterval)
+ m.EmitRuntimeStats()
+ }
+}
+
+// Emits various runtime statsitics
+func (m *Metrics) EmitRuntimeStats() {
+ // Export number of Goroutines
+ numRoutines := runtime.NumGoroutine()
+ m.SetGauge([]string{"runtime", "num_goroutines"}, float32(numRoutines))
+
+ // Export memory stats
+ var stats runtime.MemStats
+ runtime.ReadMemStats(&stats)
+ m.SetGauge([]string{"runtime", "alloc_bytes"}, float32(stats.Alloc))
+ m.SetGauge([]string{"runtime", "sys_bytes"}, float32(stats.Sys))
+ m.SetGauge([]string{"runtime", "malloc_count"}, float32(stats.Mallocs))
+ m.SetGauge([]string{"runtime", "free_count"}, float32(stats.Frees))
+ m.SetGauge([]string{"runtime", "heap_objects"}, float32(stats.HeapObjects))
+ m.SetGauge([]string{"runtime", "total_gc_pause_ns"}, float32(stats.PauseTotalNs))
+ m.SetGauge([]string{"runtime", "total_gc_runs"}, float32(stats.NumGC))
+
+ // Export info about the last few GC runs
+ num := stats.NumGC
+
+ // Handle wrap around
+ if num < m.lastNumGC {
+ m.lastNumGC = 0
+ }
+
+ // Ensure we don't scan more than 256
+ if num-m.lastNumGC >= 256 {
+ m.lastNumGC = num - 255
+ }
+
+ for i := m.lastNumGC; i < num; i++ {
+ pause := stats.PauseNs[i%256]
+ m.AddSample([]string{"runtime", "gc_pause_ns"}, float32(pause))
+ }
+ m.lastNumGC = num
+}
+
+// Creates a new slice with the provided string value as the first element
+// and the provided slice values as the remaining values.
+// Ordering of the values in the provided input slice is kept in tact in the output slice.
+func insert(i int, v string, s []string) []string {
+ // Allocate new slice to avoid modifying the input slice
+ newS := make([]string, len(s)+1)
+
+ // Copy s[0, i-1] into newS
+ for j := 0; j < i; j++ {
+ newS[j] = s[j]
+ }
+
+ // Insert provided element at index i
+ newS[i] = v
+
+ // Copy s[i, len(s)-1] into newS starting at newS[i+1]
+ for j := i; j < len(s); j++ {
+ newS[j+1] = s[j]
+ }
+
+ return newS
+}
diff --git a/vendor/github.com/armon/go-metrics/sink.go b/vendor/github.com/armon/go-metrics/sink.go
new file mode 100644
index 000000000..6f4108ff4
--- /dev/null
+++ b/vendor/github.com/armon/go-metrics/sink.go
@@ -0,0 +1,132 @@
+package metrics
+
+import (
+ "fmt"
+ "net/url"
+)
+
+// The MetricSink interface is used to transmit metrics information
+// to an external system
+type MetricSink interface {
+ // A Gauge should retain the last value it is set to
+ SetGauge(key []string, val float32)
+ SetGaugeWithLabels(key []string, val float32, labels []Label)
+
+ // Should emit a Key/Value pair for each call
+ EmitKey(key []string, val float32)
+
+ // Counters should accumulate values
+ IncrCounter(key []string, val float32)
+ IncrCounterWithLabels(key []string, val float32, labels []Label)
+
+ // Samples are for timing information, where quantiles are used
+ AddSample(key []string, val float32)
+ AddSampleWithLabels(key []string, val float32, labels []Label)
+}
+
+type ShutdownSink interface {
+ MetricSink
+
+ // Shutdown the metric sink, flush metrics to storage, and cleanup resources.
+ // Called immediately prior to application exit. Implementations must block
+ // until metrics are flushed to storage.
+ Shutdown()
+}
+
+// BlackholeSink is used to just blackhole messages
+type BlackholeSink struct{}
+
+func (*BlackholeSink) SetGauge(key []string, val float32) {}
+func (*BlackholeSink) SetGaugeWithLabels(key []string, val float32, labels []Label) {}
+func (*BlackholeSink) EmitKey(key []string, val float32) {}
+func (*BlackholeSink) IncrCounter(key []string, val float32) {}
+func (*BlackholeSink) IncrCounterWithLabels(key []string, val float32, labels []Label) {}
+func (*BlackholeSink) AddSample(key []string, val float32) {}
+func (*BlackholeSink) AddSampleWithLabels(key []string, val float32, labels []Label) {}
+
+// FanoutSink is used to sink to fanout values to multiple sinks
+type FanoutSink []MetricSink
+
+func (fh FanoutSink) SetGauge(key []string, val float32) {
+ fh.SetGaugeWithLabels(key, val, nil)
+}
+
+func (fh FanoutSink) SetGaugeWithLabels(key []string, val float32, labels []Label) {
+ for _, s := range fh {
+ s.SetGaugeWithLabels(key, val, labels)
+ }
+}
+
+func (fh FanoutSink) EmitKey(key []string, val float32) {
+ for _, s := range fh {
+ s.EmitKey(key, val)
+ }
+}
+
+func (fh FanoutSink) IncrCounter(key []string, val float32) {
+ fh.IncrCounterWithLabels(key, val, nil)
+}
+
+func (fh FanoutSink) IncrCounterWithLabels(key []string, val float32, labels []Label) {
+ for _, s := range fh {
+ s.IncrCounterWithLabels(key, val, labels)
+ }
+}
+
+func (fh FanoutSink) AddSample(key []string, val float32) {
+ fh.AddSampleWithLabels(key, val, nil)
+}
+
+func (fh FanoutSink) AddSampleWithLabels(key []string, val float32, labels []Label) {
+ for _, s := range fh {
+ s.AddSampleWithLabels(key, val, labels)
+ }
+}
+
+func (fh FanoutSink) Shutdown() {
+ for _, s := range fh {
+ if ss, ok := s.(ShutdownSink); ok {
+ ss.Shutdown()
+ }
+ }
+}
+
+// sinkURLFactoryFunc is an generic interface around the *SinkFromURL() function provided
+// by each sink type
+type sinkURLFactoryFunc func(*url.URL) (MetricSink, error)
+
+// sinkRegistry supports the generic NewMetricSink function by mapping URL
+// schemes to metric sink factory functions
+var sinkRegistry = map[string]sinkURLFactoryFunc{
+ "statsd": NewStatsdSinkFromURL,
+ "statsite": NewStatsiteSinkFromURL,
+ "inmem": NewInmemSinkFromURL,
+}
+
+// NewMetricSinkFromURL allows a generic URL input to configure any of the
+// supported sinks. The scheme of the URL identifies the type of the sink, the
+// and query parameters are used to set options.
+//
+// "statsd://" - Initializes a StatsdSink. The host and port are passed through
+// as the "addr" of the sink
+//
+// "statsite://" - Initializes a StatsiteSink. The host and port become the
+// "addr" of the sink
+//
+// "inmem://" - Initializes an InmemSink. The host and port are ignored. The
+// "interval" and "duration" query parameters must be specified with valid
+// durations, see NewInmemSink for details.
+func NewMetricSinkFromURL(urlStr string) (MetricSink, error) {
+ u, err := url.Parse(urlStr)
+ if err != nil {
+ return nil, err
+ }
+
+ sinkURLFactoryFunc := sinkRegistry[u.Scheme]
+ if sinkURLFactoryFunc == nil {
+ return nil, fmt.Errorf(
+ "cannot create metric sink, unrecognized sink name: %q", u.Scheme)
+ }
+
+ return sinkURLFactoryFunc(u)
+}
diff --git a/vendor/github.com/armon/go-metrics/start.go b/vendor/github.com/armon/go-metrics/start.go
new file mode 100644
index 000000000..38976f8dc
--- /dev/null
+++ b/vendor/github.com/armon/go-metrics/start.go
@@ -0,0 +1,158 @@
+package metrics
+
+import (
+ "os"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ iradix "github.com/hashicorp/go-immutable-radix"
+)
+
+// Config is used to configure metrics settings
+type Config struct {
+ ServiceName string // Prefixed with keys to separate services
+ HostName string // Hostname to use. If not provided and EnableHostname, it will be os.Hostname
+ EnableHostname bool // Enable prefixing gauge values with hostname
+ EnableHostnameLabel bool // Enable adding hostname to labels
+ EnableServiceLabel bool // Enable adding service to labels
+ EnableRuntimeMetrics bool // Enables profiling of runtime metrics (GC, Goroutines, Memory)
+ EnableTypePrefix bool // Prefixes key with a type ("counter", "gauge", "timer")
+ TimerGranularity time.Duration // Granularity of timers.
+ ProfileInterval time.Duration // Interval to profile runtime metrics
+
+ AllowedPrefixes []string // A list of metric prefixes to allow, with '.' as the separator
+ BlockedPrefixes []string // A list of metric prefixes to block, with '.' as the separator
+ AllowedLabels []string // A list of metric labels to allow, with '.' as the separator
+ BlockedLabels []string // A list of metric labels to block, with '.' as the separator
+ FilterDefault bool // Whether to allow metrics by default
+}
+
+// Metrics represents an instance of a metrics sink that can
+// be used to emit
+type Metrics struct {
+ Config
+ lastNumGC uint32
+ sink MetricSink
+ filter *iradix.Tree
+ allowedLabels map[string]bool
+ blockedLabels map[string]bool
+ filterLock sync.RWMutex // Lock filters and allowedLabels/blockedLabels access
+}
+
+// Shared global metrics instance
+var globalMetrics atomic.Value // *Metrics
+
+func init() {
+ // Initialize to a blackhole sink to avoid errors
+ globalMetrics.Store(&Metrics{sink: &BlackholeSink{}})
+}
+
+// Default returns the shared global metrics instance.
+func Default() *Metrics {
+ return globalMetrics.Load().(*Metrics)
+}
+
+// DefaultConfig provides a sane default configuration
+func DefaultConfig(serviceName string) *Config {
+ c := &Config{
+ ServiceName: serviceName, // Use client provided service
+ HostName: "",
+ EnableHostname: true, // Enable hostname prefix
+ EnableRuntimeMetrics: true, // Enable runtime profiling
+ EnableTypePrefix: false, // Disable type prefix
+ TimerGranularity: time.Millisecond, // Timers are in milliseconds
+ ProfileInterval: time.Second, // Poll runtime every second
+ FilterDefault: true, // Don't filter metrics by default
+ }
+
+ // Try to get the hostname
+ name, _ := os.Hostname()
+ c.HostName = name
+ return c
+}
+
+// New is used to create a new instance of Metrics
+func New(conf *Config, sink MetricSink) (*Metrics, error) {
+ met := &Metrics{}
+ met.Config = *conf
+ met.sink = sink
+ met.UpdateFilterAndLabels(conf.AllowedPrefixes, conf.BlockedPrefixes, conf.AllowedLabels, conf.BlockedLabels)
+
+ // Start the runtime collector
+ if conf.EnableRuntimeMetrics {
+ go met.collectStats()
+ }
+ return met, nil
+}
+
+// NewGlobal is the same as New, but it assigns the metrics object to be
+// used globally as well as returning it.
+func NewGlobal(conf *Config, sink MetricSink) (*Metrics, error) {
+ metrics, err := New(conf, sink)
+ if err == nil {
+ globalMetrics.Store(metrics)
+ }
+ return metrics, err
+}
+
+// Proxy all the methods to the globalMetrics instance
+func SetGauge(key []string, val float32) {
+ globalMetrics.Load().(*Metrics).SetGauge(key, val)
+}
+
+func SetGaugeWithLabels(key []string, val float32, labels []Label) {
+ globalMetrics.Load().(*Metrics).SetGaugeWithLabels(key, val, labels)
+}
+
+func EmitKey(key []string, val float32) {
+ globalMetrics.Load().(*Metrics).EmitKey(key, val)
+}
+
+func IncrCounter(key []string, val float32) {
+ globalMetrics.Load().(*Metrics).IncrCounter(key, val)
+}
+
+func IncrCounterWithLabels(key []string, val float32, labels []Label) {
+ globalMetrics.Load().(*Metrics).IncrCounterWithLabels(key, val, labels)
+}
+
+func AddSample(key []string, val float32) {
+ globalMetrics.Load().(*Metrics).AddSample(key, val)
+}
+
+func AddSampleWithLabels(key []string, val float32, labels []Label) {
+ globalMetrics.Load().(*Metrics).AddSampleWithLabels(key, val, labels)
+}
+
+func MeasureSince(key []string, start time.Time) {
+ globalMetrics.Load().(*Metrics).MeasureSince(key, start)
+}
+
+func MeasureSinceWithLabels(key []string, start time.Time, labels []Label) {
+ globalMetrics.Load().(*Metrics).MeasureSinceWithLabels(key, start, labels)
+}
+
+func UpdateFilter(allow, block []string) {
+ globalMetrics.Load().(*Metrics).UpdateFilter(allow, block)
+}
+
+// UpdateFilterAndLabels set allow/block prefixes of metrics while allowedLabels
+// and blockedLabels - when not nil - allow filtering of labels in order to
+// block/allow globally labels (especially useful when having large number of
+// values for a given label). See README.md for more information about usage.
+func UpdateFilterAndLabels(allow, block, allowedLabels, blockedLabels []string) {
+ globalMetrics.Load().(*Metrics).UpdateFilterAndLabels(allow, block, allowedLabels, blockedLabels)
+}
+
+// Shutdown disables metric collection, then blocks while attempting to flush metrics to storage.
+// WARNING: Not all MetricSink backends support this functionality, and calling this will cause them to leak resources.
+// This is intended for use immediately prior to application exit.
+func Shutdown() {
+ m := globalMetrics.Load().(*Metrics)
+ // Swap whatever MetricSink is currently active with a BlackholeSink. Callers must not have a
+ // reason to expect that calls to the library will successfully collect metrics after Shutdown
+ // has been called.
+ globalMetrics.Store(&Metrics{sink: &BlackholeSink{}})
+ m.Shutdown()
+}
diff --git a/vendor/github.com/armon/go-metrics/statsd.go b/vendor/github.com/armon/go-metrics/statsd.go
new file mode 100644
index 000000000..1bfffce46
--- /dev/null
+++ b/vendor/github.com/armon/go-metrics/statsd.go
@@ -0,0 +1,184 @@
+package metrics
+
+import (
+ "bytes"
+ "fmt"
+ "log"
+ "net"
+ "net/url"
+ "strings"
+ "time"
+)
+
+const (
+ // statsdMaxLen is the maximum size of a packet
+ // to send to statsd
+ statsdMaxLen = 1400
+)
+
+// StatsdSink provides a MetricSink that can be used
+// with a statsite or statsd metrics server. It uses
+// only UDP packets, while StatsiteSink uses TCP.
+type StatsdSink struct {
+ addr string
+ metricQueue chan string
+}
+
+// NewStatsdSinkFromURL creates an StatsdSink from a URL. It is used
+// (and tested) from NewMetricSinkFromURL.
+func NewStatsdSinkFromURL(u *url.URL) (MetricSink, error) {
+ return NewStatsdSink(u.Host)
+}
+
+// NewStatsdSink is used to create a new StatsdSink
+func NewStatsdSink(addr string) (*StatsdSink, error) {
+ s := &StatsdSink{
+ addr: addr,
+ metricQueue: make(chan string, 4096),
+ }
+ go s.flushMetrics()
+ return s, nil
+}
+
+// Close is used to stop flushing to statsd
+func (s *StatsdSink) Shutdown() {
+ close(s.metricQueue)
+}
+
+func (s *StatsdSink) SetGauge(key []string, val float32) {
+ flatKey := s.flattenKey(key)
+ s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val))
+}
+
+func (s *StatsdSink) SetGaugeWithLabels(key []string, val float32, labels []Label) {
+ flatKey := s.flattenKeyLabels(key, labels)
+ s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val))
+}
+
+func (s *StatsdSink) EmitKey(key []string, val float32) {
+ flatKey := s.flattenKey(key)
+ s.pushMetric(fmt.Sprintf("%s:%f|kv\n", flatKey, val))
+}
+
+func (s *StatsdSink) IncrCounter(key []string, val float32) {
+ flatKey := s.flattenKey(key)
+ s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val))
+}
+
+func (s *StatsdSink) IncrCounterWithLabels(key []string, val float32, labels []Label) {
+ flatKey := s.flattenKeyLabels(key, labels)
+ s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val))
+}
+
+func (s *StatsdSink) AddSample(key []string, val float32) {
+ flatKey := s.flattenKey(key)
+ s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val))
+}
+
+func (s *StatsdSink) AddSampleWithLabels(key []string, val float32, labels []Label) {
+ flatKey := s.flattenKeyLabels(key, labels)
+ s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val))
+}
+
+// Flattens the key for formatting, removes spaces
+func (s *StatsdSink) flattenKey(parts []string) string {
+ joined := strings.Join(parts, ".")
+ return strings.Map(func(r rune) rune {
+ switch r {
+ case ':':
+ fallthrough
+ case ' ':
+ return '_'
+ default:
+ return r
+ }
+ }, joined)
+}
+
+// Flattens the key along with labels for formatting, removes spaces
+func (s *StatsdSink) flattenKeyLabels(parts []string, labels []Label) string {
+ for _, label := range labels {
+ parts = append(parts, label.Value)
+ }
+ return s.flattenKey(parts)
+}
+
+// Does a non-blocking push to the metrics queue
+func (s *StatsdSink) pushMetric(m string) {
+ select {
+ case s.metricQueue <- m:
+ default:
+ }
+}
+
+// Flushes metrics
+func (s *StatsdSink) flushMetrics() {
+ var sock net.Conn
+ var err error
+ var wait <-chan time.Time
+ ticker := time.NewTicker(flushInterval)
+ defer ticker.Stop()
+
+CONNECT:
+ // Create a buffer
+ buf := bytes.NewBuffer(nil)
+
+ // Attempt to connect
+ sock, err = net.Dial("udp", s.addr)
+ if err != nil {
+ log.Printf("[ERR] Error connecting to statsd! Err: %s", err)
+ goto WAIT
+ }
+
+ for {
+ select {
+ case metric, ok := <-s.metricQueue:
+ // Get a metric from the queue
+ if !ok {
+ goto QUIT
+ }
+
+ // Check if this would overflow the packet size
+ if len(metric)+buf.Len() > statsdMaxLen {
+ _, err := sock.Write(buf.Bytes())
+ buf.Reset()
+ if err != nil {
+ log.Printf("[ERR] Error writing to statsd! Err: %s", err)
+ goto WAIT
+ }
+ }
+
+ // Append to the buffer
+ buf.WriteString(metric)
+
+ case <-ticker.C:
+ if buf.Len() == 0 {
+ continue
+ }
+
+ _, err := sock.Write(buf.Bytes())
+ buf.Reset()
+ if err != nil {
+ log.Printf("[ERR] Error flushing to statsd! Err: %s", err)
+ goto WAIT
+ }
+ }
+ }
+
+WAIT:
+ // Wait for a while
+ wait = time.After(time.Duration(5) * time.Second)
+ for {
+ select {
+ // Dequeue the messages to avoid backlog
+ case _, ok := <-s.metricQueue:
+ if !ok {
+ goto QUIT
+ }
+ case <-wait:
+ goto CONNECT
+ }
+ }
+QUIT:
+ s.metricQueue = nil
+}
diff --git a/vendor/github.com/armon/go-metrics/statsite.go b/vendor/github.com/armon/go-metrics/statsite.go
new file mode 100644
index 000000000..6c0d284d2
--- /dev/null
+++ b/vendor/github.com/armon/go-metrics/statsite.go
@@ -0,0 +1,172 @@
+package metrics
+
+import (
+ "bufio"
+ "fmt"
+ "log"
+ "net"
+ "net/url"
+ "strings"
+ "time"
+)
+
+const (
+ // We force flush the statsite metrics after this period of
+ // inactivity. Prevents stats from getting stuck in a buffer
+ // forever.
+ flushInterval = 100 * time.Millisecond
+)
+
+// NewStatsiteSinkFromURL creates an StatsiteSink from a URL. It is used
+// (and tested) from NewMetricSinkFromURL.
+func NewStatsiteSinkFromURL(u *url.URL) (MetricSink, error) {
+ return NewStatsiteSink(u.Host)
+}
+
+// StatsiteSink provides a MetricSink that can be used with a
+// statsite metrics server
+type StatsiteSink struct {
+ addr string
+ metricQueue chan string
+}
+
+// NewStatsiteSink is used to create a new StatsiteSink
+func NewStatsiteSink(addr string) (*StatsiteSink, error) {
+ s := &StatsiteSink{
+ addr: addr,
+ metricQueue: make(chan string, 4096),
+ }
+ go s.flushMetrics()
+ return s, nil
+}
+
+// Close is used to stop flushing to statsite
+func (s *StatsiteSink) Shutdown() {
+ close(s.metricQueue)
+}
+
+func (s *StatsiteSink) SetGauge(key []string, val float32) {
+ flatKey := s.flattenKey(key)
+ s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val))
+}
+
+func (s *StatsiteSink) SetGaugeWithLabels(key []string, val float32, labels []Label) {
+ flatKey := s.flattenKeyLabels(key, labels)
+ s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val))
+}
+
+func (s *StatsiteSink) EmitKey(key []string, val float32) {
+ flatKey := s.flattenKey(key)
+ s.pushMetric(fmt.Sprintf("%s:%f|kv\n", flatKey, val))
+}
+
+func (s *StatsiteSink) IncrCounter(key []string, val float32) {
+ flatKey := s.flattenKey(key)
+ s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val))
+}
+
+func (s *StatsiteSink) IncrCounterWithLabels(key []string, val float32, labels []Label) {
+ flatKey := s.flattenKeyLabels(key, labels)
+ s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val))
+}
+
+func (s *StatsiteSink) AddSample(key []string, val float32) {
+ flatKey := s.flattenKey(key)
+ s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val))
+}
+
+func (s *StatsiteSink) AddSampleWithLabels(key []string, val float32, labels []Label) {
+ flatKey := s.flattenKeyLabels(key, labels)
+ s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val))
+}
+
+// Flattens the key for formatting, removes spaces
+func (s *StatsiteSink) flattenKey(parts []string) string {
+ joined := strings.Join(parts, ".")
+ return strings.Map(func(r rune) rune {
+ switch r {
+ case ':':
+ fallthrough
+ case ' ':
+ return '_'
+ default:
+ return r
+ }
+ }, joined)
+}
+
+// Flattens the key along with labels for formatting, removes spaces
+func (s *StatsiteSink) flattenKeyLabels(parts []string, labels []Label) string {
+ for _, label := range labels {
+ parts = append(parts, label.Value)
+ }
+ return s.flattenKey(parts)
+}
+
+// Does a non-blocking push to the metrics queue
+func (s *StatsiteSink) pushMetric(m string) {
+ select {
+ case s.metricQueue <- m:
+ default:
+ }
+}
+
+// Flushes metrics
+func (s *StatsiteSink) flushMetrics() {
+ var sock net.Conn
+ var err error
+ var wait <-chan time.Time
+ var buffered *bufio.Writer
+ ticker := time.NewTicker(flushInterval)
+ defer ticker.Stop()
+
+CONNECT:
+ // Attempt to connect
+ sock, err = net.Dial("tcp", s.addr)
+ if err != nil {
+ log.Printf("[ERR] Error connecting to statsite! Err: %s", err)
+ goto WAIT
+ }
+
+ // Create a buffered writer
+ buffered = bufio.NewWriter(sock)
+
+ for {
+ select {
+ case metric, ok := <-s.metricQueue:
+ // Get a metric from the queue
+ if !ok {
+ goto QUIT
+ }
+
+ // Try to send to statsite
+ _, err := buffered.Write([]byte(metric))
+ if err != nil {
+ log.Printf("[ERR] Error writing to statsite! Err: %s", err)
+ goto WAIT
+ }
+ case <-ticker.C:
+ if err := buffered.Flush(); err != nil {
+ log.Printf("[ERR] Error flushing to statsite! Err: %s", err)
+ goto WAIT
+ }
+ }
+ }
+
+WAIT:
+ // Wait for a while
+ wait = time.After(time.Duration(5) * time.Second)
+ for {
+ select {
+ // Dequeue the messages to avoid backlog
+ case _, ok := <-s.metricQueue:
+ if !ok {
+ goto QUIT
+ }
+ case <-wait:
+ goto CONNECT
+ }
+ }
+QUIT:
+ s.metricQueue = nil
+}
diff --git a/vendor/github.com/beorn7/perks/LICENSE b/vendor/github.com/beorn7/perks/LICENSE
new file mode 100644
index 000000000..339177be6
--- /dev/null
+++ b/vendor/github.com/beorn7/perks/LICENSE
@@ -0,0 +1,20 @@
+Copyright (C) 2013 Blake Mizerany
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/beorn7/perks/quantile/exampledata.txt b/vendor/github.com/beorn7/perks/quantile/exampledata.txt
new file mode 100644
index 000000000..1602287d7
--- /dev/null
+++ b/vendor/github.com/beorn7/perks/quantile/exampledata.txt
@@ -0,0 +1,2388 @@
+8
+5
+26
+12
+5
+235
+13
+6
+28
+30
+3
+3
+3
+3
+5
+2
+33
+7
+2
+4
+7
+12
+14
+5
+8
+3
+10
+4
+5
+3
+6
+6
+209
+20
+3
+10
+14
+3
+4
+6
+8
+5
+11
+7
+3
+2
+3
+3
+212
+5
+222
+4
+10
+10
+5
+6
+3
+8
+3
+10
+254
+220
+2
+3
+5
+24
+5
+4
+222
+7
+3
+3
+223
+8
+15
+12
+14
+14
+3
+2
+2
+3
+13
+3
+11
+4
+4
+6
+5
+7
+13
+5
+3
+5
+2
+5
+3
+5
+2
+7
+15
+17
+14
+3
+6
+6
+3
+17
+5
+4
+7
+6
+4
+4
+8
+6
+8
+3
+9
+3
+6
+3
+4
+5
+3
+3
+660
+4
+6
+10
+3
+6
+3
+2
+5
+13
+2
+4
+4
+10
+4
+8
+4
+3
+7
+9
+9
+3
+10
+37
+3
+13
+4
+12
+3
+6
+10
+8
+5
+21
+2
+3
+8
+3
+2
+3
+3
+4
+12
+2
+4
+8
+8
+4
+3
+2
+20
+1
+6
+32
+2
+11
+6
+18
+3
+8
+11
+3
+212
+3
+4
+2
+6
+7
+12
+11
+3
+2
+16
+10
+6
+4
+6
+3
+2
+7
+3
+2
+2
+2
+2
+5
+6
+4
+3
+10
+3
+4
+6
+5
+3
+4
+4
+5
+6
+4
+3
+4
+4
+5
+7
+5
+5
+3
+2
+7
+2
+4
+12
+4
+5
+6
+2
+4
+4
+8
+4
+15
+13
+7
+16
+5
+3
+23
+5
+5
+7
+3
+2
+9
+8
+7
+5
+8
+11
+4
+10
+76
+4
+47
+4
+3
+2
+7
+4
+2
+3
+37
+10
+4
+2
+20
+5
+4
+4
+10
+10
+4
+3
+7
+23
+240
+7
+13
+5
+5
+3
+3
+2
+5
+4
+2
+8
+7
+19
+2
+23
+8
+7
+2
+5
+3
+8
+3
+8
+13
+5
+5
+5
+2
+3
+23
+4
+9
+8
+4
+3
+3
+5
+220
+2
+3
+4
+6
+14
+3
+53
+6
+2
+5
+18
+6
+3
+219
+6
+5
+2
+5
+3
+6
+5
+15
+4
+3
+17
+3
+2
+4
+7
+2
+3
+3
+4
+4
+3
+2
+664
+6
+3
+23
+5
+5
+16
+5
+8
+2
+4
+2
+24
+12
+3
+2
+3
+5
+8
+3
+5
+4
+3
+14
+3
+5
+8
+2
+3
+7
+9
+4
+2
+3
+6
+8
+4
+3
+4
+6
+5
+3
+3
+6
+3
+19
+4
+4
+6
+3
+6
+3
+5
+22
+5
+4
+4
+3
+8
+11
+4
+9
+7
+6
+13
+4
+4
+4
+6
+17
+9
+3
+3
+3
+4
+3
+221
+5
+11
+3
+4
+2
+12
+6
+3
+5
+7
+5
+7
+4
+9
+7
+14
+37
+19
+217
+16
+3
+5
+2
+2
+7
+19
+7
+6
+7
+4
+24
+5
+11
+4
+7
+7
+9
+13
+3
+4
+3
+6
+28
+4
+4
+5
+5
+2
+5
+6
+4
+4
+6
+10
+5
+4
+3
+2
+3
+3
+6
+5
+5
+4
+3
+2
+3
+7
+4
+6
+18
+16
+8
+16
+4
+5
+8
+6
+9
+13
+1545
+6
+215
+6
+5
+6
+3
+45
+31
+5
+2
+2
+4
+3
+3
+2
+5
+4
+3
+5
+7
+7
+4
+5
+8
+5
+4
+749
+2
+31
+9
+11
+2
+11
+5
+4
+4
+7
+9
+11
+4
+5
+4
+7
+3
+4
+6
+2
+15
+3
+4
+3
+4
+3
+5
+2
+13
+5
+5
+3
+3
+23
+4
+4
+5
+7
+4
+13
+2
+4
+3
+4
+2
+6
+2
+7
+3
+5
+5
+3
+29
+5
+4
+4
+3
+10
+2
+3
+79
+16
+6
+6
+7
+7
+3
+5
+5
+7
+4
+3
+7
+9
+5
+6
+5
+9
+6
+3
+6
+4
+17
+2
+10
+9
+3
+6
+2
+3
+21
+22
+5
+11
+4
+2
+17
+2
+224
+2
+14
+3
+4
+4
+2
+4
+4
+4
+4
+5
+3
+4
+4
+10
+2
+6
+3
+3
+5
+7
+2
+7
+5
+6
+3
+218
+2
+2
+5
+2
+6
+3
+5
+222
+14
+6
+33
+3
+2
+5
+3
+3
+3
+9
+5
+3
+3
+2
+7
+4
+3
+4
+3
+5
+6
+5
+26
+4
+13
+9
+7
+3
+221
+3
+3
+4
+4
+4
+4
+2
+18
+5
+3
+7
+9
+6
+8
+3
+10
+3
+11
+9
+5
+4
+17
+5
+5
+6
+6
+3
+2
+4
+12
+17
+6
+7
+218
+4
+2
+4
+10
+3
+5
+15
+3
+9
+4
+3
+3
+6
+29
+3
+3
+4
+5
+5
+3
+8
+5
+6
+6
+7
+5
+3
+5
+3
+29
+2
+31
+5
+15
+24
+16
+5
+207
+4
+3
+3
+2
+15
+4
+4
+13
+5
+5
+4
+6
+10
+2
+7
+8
+4
+6
+20
+5
+3
+4
+3
+12
+12
+5
+17
+7
+3
+3
+3
+6
+10
+3
+5
+25
+80
+4
+9
+3
+2
+11
+3
+3
+2
+3
+8
+7
+5
+5
+19
+5
+3
+3
+12
+11
+2
+6
+5
+5
+5
+3
+3
+3
+4
+209
+14
+3
+2
+5
+19
+4
+4
+3
+4
+14
+5
+6
+4
+13
+9
+7
+4
+7
+10
+2
+9
+5
+7
+2
+8
+4
+6
+5
+5
+222
+8
+7
+12
+5
+216
+3
+4
+4
+6
+3
+14
+8
+7
+13
+4
+3
+3
+3
+3
+17
+5
+4
+3
+33
+6
+6
+33
+7
+5
+3
+8
+7
+5
+2
+9
+4
+2
+233
+24
+7
+4
+8
+10
+3
+4
+15
+2
+16
+3
+3
+13
+12
+7
+5
+4
+207
+4
+2
+4
+27
+15
+2
+5
+2
+25
+6
+5
+5
+6
+13
+6
+18
+6
+4
+12
+225
+10
+7
+5
+2
+2
+11
+4
+14
+21
+8
+10
+3
+5
+4
+232
+2
+5
+5
+3
+7
+17
+11
+6
+6
+23
+4
+6
+3
+5
+4
+2
+17
+3
+6
+5
+8
+3
+2
+2
+14
+9
+4
+4
+2
+5
+5
+3
+7
+6
+12
+6
+10
+3
+6
+2
+2
+19
+5
+4
+4
+9
+2
+4
+13
+3
+5
+6
+3
+6
+5
+4
+9
+6
+3
+5
+7
+3
+6
+6
+4
+3
+10
+6
+3
+221
+3
+5
+3
+6
+4
+8
+5
+3
+6
+4
+4
+2
+54
+5
+6
+11
+3
+3
+4
+4
+4
+3
+7
+3
+11
+11
+7
+10
+6
+13
+223
+213
+15
+231
+7
+3
+7
+228
+2
+3
+4
+4
+5
+6
+7
+4
+13
+3
+4
+5
+3
+6
+4
+6
+7
+2
+4
+3
+4
+3
+3
+6
+3
+7
+3
+5
+18
+5
+6
+8
+10
+3
+3
+3
+2
+4
+2
+4
+4
+5
+6
+6
+4
+10
+13
+3
+12
+5
+12
+16
+8
+4
+19
+11
+2
+4
+5
+6
+8
+5
+6
+4
+18
+10
+4
+2
+216
+6
+6
+6
+2
+4
+12
+8
+3
+11
+5
+6
+14
+5
+3
+13
+4
+5
+4
+5
+3
+28
+6
+3
+7
+219
+3
+9
+7
+3
+10
+6
+3
+4
+19
+5
+7
+11
+6
+15
+19
+4
+13
+11
+3
+7
+5
+10
+2
+8
+11
+2
+6
+4
+6
+24
+6
+3
+3
+3
+3
+6
+18
+4
+11
+4
+2
+5
+10
+8
+3
+9
+5
+3
+4
+5
+6
+2
+5
+7
+4
+4
+14
+6
+4
+4
+5
+5
+7
+2
+4
+3
+7
+3
+3
+6
+4
+5
+4
+4
+4
+3
+3
+3
+3
+8
+14
+2
+3
+5
+3
+2
+4
+5
+3
+7
+3
+3
+18
+3
+4
+4
+5
+7
+3
+3
+3
+13
+5
+4
+8
+211
+5
+5
+3
+5
+2
+5
+4
+2
+655
+6
+3
+5
+11
+2
+5
+3
+12
+9
+15
+11
+5
+12
+217
+2
+6
+17
+3
+3
+207
+5
+5
+4
+5
+9
+3
+2
+8
+5
+4
+3
+2
+5
+12
+4
+14
+5
+4
+2
+13
+5
+8
+4
+225
+4
+3
+4
+5
+4
+3
+3
+6
+23
+9
+2
+6
+7
+233
+4
+4
+6
+18
+3
+4
+6
+3
+4
+4
+2
+3
+7
+4
+13
+227
+4
+3
+5
+4
+2
+12
+9
+17
+3
+7
+14
+6
+4
+5
+21
+4
+8
+9
+2
+9
+25
+16
+3
+6
+4
+7
+8
+5
+2
+3
+5
+4
+3
+3
+5
+3
+3
+3
+2
+3
+19
+2
+4
+3
+4
+2
+3
+4
+4
+2
+4
+3
+3
+3
+2
+6
+3
+17
+5
+6
+4
+3
+13
+5
+3
+3
+3
+4
+9
+4
+2
+14
+12
+4
+5
+24
+4
+3
+37
+12
+11
+21
+3
+4
+3
+13
+4
+2
+3
+15
+4
+11
+4
+4
+3
+8
+3
+4
+4
+12
+8
+5
+3
+3
+4
+2
+220
+3
+5
+223
+3
+3
+3
+10
+3
+15
+4
+241
+9
+7
+3
+6
+6
+23
+4
+13
+7
+3
+4
+7
+4
+9
+3
+3
+4
+10
+5
+5
+1
+5
+24
+2
+4
+5
+5
+6
+14
+3
+8
+2
+3
+5
+13
+13
+3
+5
+2
+3
+15
+3
+4
+2
+10
+4
+4
+4
+5
+5
+3
+5
+3
+4
+7
+4
+27
+3
+6
+4
+15
+3
+5
+6
+6
+5
+4
+8
+3
+9
+2
+6
+3
+4
+3
+7
+4
+18
+3
+11
+3
+3
+8
+9
+7
+24
+3
+219
+7
+10
+4
+5
+9
+12
+2
+5
+4
+4
+4
+3
+3
+19
+5
+8
+16
+8
+6
+22
+3
+23
+3
+242
+9
+4
+3
+3
+5
+7
+3
+3
+5
+8
+3
+7
+5
+14
+8
+10
+3
+4
+3
+7
+4
+6
+7
+4
+10
+4
+3
+11
+3
+7
+10
+3
+13
+6
+8
+12
+10
+5
+7
+9
+3
+4
+7
+7
+10
+8
+30
+9
+19
+4
+3
+19
+15
+4
+13
+3
+215
+223
+4
+7
+4
+8
+17
+16
+3
+7
+6
+5
+5
+4
+12
+3
+7
+4
+4
+13
+4
+5
+2
+5
+6
+5
+6
+6
+7
+10
+18
+23
+9
+3
+3
+6
+5
+2
+4
+2
+7
+3
+3
+2
+5
+5
+14
+10
+224
+6
+3
+4
+3
+7
+5
+9
+3
+6
+4
+2
+5
+11
+4
+3
+3
+2
+8
+4
+7
+4
+10
+7
+3
+3
+18
+18
+17
+3
+3
+3
+4
+5
+3
+3
+4
+12
+7
+3
+11
+13
+5
+4
+7
+13
+5
+4
+11
+3
+12
+3
+6
+4
+4
+21
+4
+6
+9
+5
+3
+10
+8
+4
+6
+4
+4
+6
+5
+4
+8
+6
+4
+6
+4
+4
+5
+9
+6
+3
+4
+2
+9
+3
+18
+2
+4
+3
+13
+3
+6
+6
+8
+7
+9
+3
+2
+16
+3
+4
+6
+3
+2
+33
+22
+14
+4
+9
+12
+4
+5
+6
+3
+23
+9
+4
+3
+5
+5
+3
+4
+5
+3
+5
+3
+10
+4
+5
+5
+8
+4
+4
+6
+8
+5
+4
+3
+4
+6
+3
+3
+3
+5
+9
+12
+6
+5
+9
+3
+5
+3
+2
+2
+2
+18
+3
+2
+21
+2
+5
+4
+6
+4
+5
+10
+3
+9
+3
+2
+10
+7
+3
+6
+6
+4
+4
+8
+12
+7
+3
+7
+3
+3
+9
+3
+4
+5
+4
+4
+5
+5
+10
+15
+4
+4
+14
+6
+227
+3
+14
+5
+216
+22
+5
+4
+2
+2
+6
+3
+4
+2
+9
+9
+4
+3
+28
+13
+11
+4
+5
+3
+3
+2
+3
+3
+5
+3
+4
+3
+5
+23
+26
+3
+4
+5
+6
+4
+6
+3
+5
+5
+3
+4
+3
+2
+2
+2
+7
+14
+3
+6
+7
+17
+2
+2
+15
+14
+16
+4
+6
+7
+13
+6
+4
+5
+6
+16
+3
+3
+28
+3
+6
+15
+3
+9
+2
+4
+6
+3
+3
+22
+4
+12
+6
+7
+2
+5
+4
+10
+3
+16
+6
+9
+2
+5
+12
+7
+5
+5
+5
+5
+2
+11
+9
+17
+4
+3
+11
+7
+3
+5
+15
+4
+3
+4
+211
+8
+7
+5
+4
+7
+6
+7
+6
+3
+6
+5
+6
+5
+3
+4
+4
+26
+4
+6
+10
+4
+4
+3
+2
+3
+3
+4
+5
+9
+3
+9
+4
+4
+5
+5
+8
+2
+4
+2
+3
+8
+4
+11
+19
+5
+8
+6
+3
+5
+6
+12
+3
+2
+4
+16
+12
+3
+4
+4
+8
+6
+5
+6
+6
+219
+8
+222
+6
+16
+3
+13
+19
+5
+4
+3
+11
+6
+10
+4
+7
+7
+12
+5
+3
+3
+5
+6
+10
+3
+8
+2
+5
+4
+7
+2
+4
+4
+2
+12
+9
+6
+4
+2
+40
+2
+4
+10
+4
+223
+4
+2
+20
+6
+7
+24
+5
+4
+5
+2
+20
+16
+6
+5
+13
+2
+3
+3
+19
+3
+2
+4
+5
+6
+7
+11
+12
+5
+6
+7
+7
+3
+5
+3
+5
+3
+14
+3
+4
+4
+2
+11
+1
+7
+3
+9
+6
+11
+12
+5
+8
+6
+221
+4
+2
+12
+4
+3
+15
+4
+5
+226
+7
+218
+7
+5
+4
+5
+18
+4
+5
+9
+4
+4
+2
+9
+18
+18
+9
+5
+6
+6
+3
+3
+7
+3
+5
+4
+4
+4
+12
+3
+6
+31
+5
+4
+7
+3
+6
+5
+6
+5
+11
+2
+2
+11
+11
+6
+7
+5
+8
+7
+10
+5
+23
+7
+4
+3
+5
+34
+2
+5
+23
+7
+3
+6
+8
+4
+4
+4
+2
+5
+3
+8
+5
+4
+8
+25
+2
+3
+17
+8
+3
+4
+8
+7
+3
+15
+6
+5
+7
+21
+9
+5
+6
+6
+5
+3
+2
+3
+10
+3
+6
+3
+14
+7
+4
+4
+8
+7
+8
+2
+6
+12
+4
+213
+6
+5
+21
+8
+2
+5
+23
+3
+11
+2
+3
+6
+25
+2
+3
+6
+7
+6
+6
+4
+4
+6
+3
+17
+9
+7
+6
+4
+3
+10
+7
+2
+3
+3
+3
+11
+8
+3
+7
+6
+4
+14
+36
+3
+4
+3
+3
+22
+13
+21
+4
+2
+7
+4
+4
+17
+15
+3
+7
+11
+2
+4
+7
+6
+209
+6
+3
+2
+2
+24
+4
+9
+4
+3
+3
+3
+29
+2
+2
+4
+3
+3
+5
+4
+6
+3
+3
+2
+4
diff --git a/vendor/github.com/beorn7/perks/quantile/stream.go b/vendor/github.com/beorn7/perks/quantile/stream.go
new file mode 100644
index 000000000..d7d14f8eb
--- /dev/null
+++ b/vendor/github.com/beorn7/perks/quantile/stream.go
@@ -0,0 +1,316 @@
+// Package quantile computes approximate quantiles over an unbounded data
+// stream within low memory and CPU bounds.
+//
+// A small amount of accuracy is traded to achieve the above properties.
+//
+// Multiple streams can be merged before calling Query to generate a single set
+// of results. This is meaningful when the streams represent the same type of
+// data. See Merge and Samples.
+//
+// For more detailed information about the algorithm used, see:
+//
+// Effective Computation of Biased Quantiles over Data Streams
+//
+// http://www.cs.rutgers.edu/~muthu/bquant.pdf
+package quantile
+
+import (
+ "math"
+ "sort"
+)
+
+// Sample holds an observed value and meta information for compression. JSON
+// tags have been added for convenience.
+type Sample struct {
+ Value float64 `json:",string"`
+ Width float64 `json:",string"`
+ Delta float64 `json:",string"`
+}
+
+// Samples represents a slice of samples. It implements sort.Interface.
+type Samples []Sample
+
+func (a Samples) Len() int { return len(a) }
+func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value }
+func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+
+type invariant func(s *stream, r float64) float64
+
+// NewLowBiased returns an initialized Stream for low-biased quantiles
+// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
+// error guarantees can still be given even for the lower ranks of the data
+// distribution.
+//
+// The provided epsilon is a relative error, i.e. the true quantile of a value
+// returned by a query is guaranteed to be within (1±Epsilon)*Quantile.
+//
+// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
+// properties.
+func NewLowBiased(epsilon float64) *Stream {
+ ƒ := func(s *stream, r float64) float64 {
+ return 2 * epsilon * r
+ }
+ return newStream(ƒ)
+}
+
+// NewHighBiased returns an initialized Stream for high-biased quantiles
+// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
+// error guarantees can still be given even for the higher ranks of the data
+// distribution.
+//
+// The provided epsilon is a relative error, i.e. the true quantile of a value
+// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile).
+//
+// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
+// properties.
+func NewHighBiased(epsilon float64) *Stream {
+ ƒ := func(s *stream, r float64) float64 {
+ return 2 * epsilon * (s.n - r)
+ }
+ return newStream(ƒ)
+}
+
+// NewTargeted returns an initialized Stream concerned with a particular set of
+// quantile values that are supplied a priori. Knowing these a priori reduces
+// space and computation time. The targets map maps the desired quantiles to
+// their absolute errors, i.e. the true quantile of a value returned by a query
+// is guaranteed to be within (Quantile±Epsilon).
+//
+// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
+func NewTargeted(targetMap map[float64]float64) *Stream {
+ // Convert map to slice to avoid slow iterations on a map.
+ // ƒ is called on the hot path, so converting the map to a slice
+ // beforehand results in significant CPU savings.
+ targets := targetMapToSlice(targetMap)
+
+ ƒ := func(s *stream, r float64) float64 {
+ var m = math.MaxFloat64
+ var f float64
+ for _, t := range targets {
+ if t.quantile*s.n <= r {
+ f = (2 * t.epsilon * r) / t.quantile
+ } else {
+ f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile)
+ }
+ if f < m {
+ m = f
+ }
+ }
+ return m
+ }
+ return newStream(ƒ)
+}
+
+type target struct {
+ quantile float64
+ epsilon float64
+}
+
+func targetMapToSlice(targetMap map[float64]float64) []target {
+ targets := make([]target, 0, len(targetMap))
+
+ for quantile, epsilon := range targetMap {
+ t := target{
+ quantile: quantile,
+ epsilon: epsilon,
+ }
+ targets = append(targets, t)
+ }
+
+ return targets
+}
+
+// Stream computes quantiles for a stream of float64s. It is not thread-safe by
+// design. Take care when using across multiple goroutines.
+type Stream struct {
+ *stream
+ b Samples
+ sorted bool
+}
+
+func newStream(ƒ invariant) *Stream {
+ x := &stream{ƒ: ƒ}
+ return &Stream{x, make(Samples, 0, 500), true}
+}
+
+// Insert inserts v into the stream.
+func (s *Stream) Insert(v float64) {
+ s.insert(Sample{Value: v, Width: 1})
+}
+
+func (s *Stream) insert(sample Sample) {
+ s.b = append(s.b, sample)
+ s.sorted = false
+ if len(s.b) == cap(s.b) {
+ s.flush()
+ }
+}
+
+// Query returns the computed qth percentiles value. If s was created with
+// NewTargeted, and q is not in the set of quantiles provided a priori, Query
+// will return an unspecified result.
+func (s *Stream) Query(q float64) float64 {
+ if !s.flushed() {
+ // Fast path when there hasn't been enough data for a flush;
+ // this also yields better accuracy for small sets of data.
+ l := len(s.b)
+ if l == 0 {
+ return 0
+ }
+ i := int(math.Ceil(float64(l) * q))
+ if i > 0 {
+ i -= 1
+ }
+ s.maybeSort()
+ return s.b[i].Value
+ }
+ s.flush()
+ return s.stream.query(q)
+}
+
+// Merge merges samples into the underlying streams samples. This is handy when
+// merging multiple streams from separate threads, database shards, etc.
+//
+// ATTENTION: This method is broken and does not yield correct results. The
+// underlying algorithm is not capable of merging streams correctly.
+func (s *Stream) Merge(samples Samples) {
+ sort.Sort(samples)
+ s.stream.merge(samples)
+}
+
+// Reset reinitializes and clears the list reusing the samples buffer memory.
+func (s *Stream) Reset() {
+ s.stream.reset()
+ s.b = s.b[:0]
+}
+
+// Samples returns stream samples held by s.
+func (s *Stream) Samples() Samples {
+ if !s.flushed() {
+ return s.b
+ }
+ s.flush()
+ return s.stream.samples()
+}
+
+// Count returns the total number of samples observed in the stream
+// since initialization.
+func (s *Stream) Count() int {
+ return len(s.b) + s.stream.count()
+}
+
+func (s *Stream) flush() {
+ s.maybeSort()
+ s.stream.merge(s.b)
+ s.b = s.b[:0]
+}
+
+func (s *Stream) maybeSort() {
+ if !s.sorted {
+ s.sorted = true
+ sort.Sort(s.b)
+ }
+}
+
+func (s *Stream) flushed() bool {
+ return len(s.stream.l) > 0
+}
+
+type stream struct {
+ n float64
+ l []Sample
+ ƒ invariant
+}
+
+func (s *stream) reset() {
+ s.l = s.l[:0]
+ s.n = 0
+}
+
+func (s *stream) insert(v float64) {
+ s.merge(Samples{{v, 1, 0}})
+}
+
+func (s *stream) merge(samples Samples) {
+ // TODO(beorn7): This tries to merge not only individual samples, but
+ // whole summaries. The paper doesn't mention merging summaries at
+ // all. Unittests show that the merging is inaccurate. Find out how to
+ // do merges properly.
+ var r float64
+ i := 0
+ for _, sample := range samples {
+ for ; i < len(s.l); i++ {
+ c := s.l[i]
+ if c.Value > sample.Value {
+ // Insert at position i.
+ s.l = append(s.l, Sample{})
+ copy(s.l[i+1:], s.l[i:])
+ s.l[i] = Sample{
+ sample.Value,
+ sample.Width,
+ math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1),
+ // TODO(beorn7): How to calculate delta correctly?
+ }
+ i++
+ goto inserted
+ }
+ r += c.Width
+ }
+ s.l = append(s.l, Sample{sample.Value, sample.Width, 0})
+ i++
+ inserted:
+ s.n += sample.Width
+ r += sample.Width
+ }
+ s.compress()
+}
+
+func (s *stream) count() int {
+ return int(s.n)
+}
+
+func (s *stream) query(q float64) float64 {
+ t := math.Ceil(q * s.n)
+ t += math.Ceil(s.ƒ(s, t) / 2)
+ p := s.l[0]
+ var r float64
+ for _, c := range s.l[1:] {
+ r += p.Width
+ if r+c.Width+c.Delta > t {
+ return p.Value
+ }
+ p = c
+ }
+ return p.Value
+}
+
+func (s *stream) compress() {
+ if len(s.l) < 2 {
+ return
+ }
+ x := s.l[len(s.l)-1]
+ xi := len(s.l) - 1
+ r := s.n - 1 - x.Width
+
+ for i := len(s.l) - 2; i >= 0; i-- {
+ c := s.l[i]
+ if c.Width+x.Width+x.Delta <= s.ƒ(s, r) {
+ x.Width += c.Width
+ s.l[xi] = x
+ // Remove element at i.
+ copy(s.l[i:], s.l[i+1:])
+ s.l = s.l[:len(s.l)-1]
+ xi -= 1
+ } else {
+ x = c
+ xi = i
+ }
+ r -= c.Width
+ }
+}
+
+func (s *stream) samples() Samples {
+ samples := make(Samples, len(s.l))
+ copy(samples, s.l)
+ return samples
+}
diff --git a/vendor/github.com/cespare/xxhash/v2/LICENSE.txt b/vendor/github.com/cespare/xxhash/v2/LICENSE.txt
new file mode 100644
index 000000000..24b53065f
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/v2/LICENSE.txt
@@ -0,0 +1,22 @@
+Copyright (c) 2016 Caleb Spare
+
+MIT License
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/cespare/xxhash/v2/README.md b/vendor/github.com/cespare/xxhash/v2/README.md
new file mode 100644
index 000000000..8bf0e5b78
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/v2/README.md
@@ -0,0 +1,72 @@
+# xxhash
+
+[![Go Reference](https://pkg.go.dev/badge/github.com/cespare/xxhash/v2.svg)](https://pkg.go.dev/github.com/cespare/xxhash/v2)
+[![Test](https://github.com/cespare/xxhash/actions/workflows/test.yml/badge.svg)](https://github.com/cespare/xxhash/actions/workflows/test.yml)
+
+xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a
+high-quality hashing algorithm that is much faster than anything in the Go
+standard library.
+
+This package provides a straightforward API:
+
+```
+func Sum64(b []byte) uint64
+func Sum64String(s string) uint64
+type Digest struct{ ... }
+ func New() *Digest
+```
+
+The `Digest` type implements hash.Hash64. Its key methods are:
+
+```
+func (*Digest) Write([]byte) (int, error)
+func (*Digest) WriteString(string) (int, error)
+func (*Digest) Sum64() uint64
+```
+
+The package is written with optimized pure Go and also contains even faster
+assembly implementations for amd64 and arm64. If desired, the `purego` build tag
+opts into using the Go code even on those architectures.
+
+[xxHash]: http://cyan4973.github.io/xxHash/
+
+## Compatibility
+
+This package is in a module and the latest code is in version 2 of the module.
+You need a version of Go with at least "minimal module compatibility" to use
+github.com/cespare/xxhash/v2:
+
+* 1.9.7+ for Go 1.9
+* 1.10.3+ for Go 1.10
+* Go 1.11 or later
+
+I recommend using the latest release of Go.
+
+## Benchmarks
+
+Here are some quick benchmarks comparing the pure-Go and assembly
+implementations of Sum64.
+
+| input size | purego | asm |
+| ---------- | --------- | --------- |
+| 4 B | 1.3 GB/s | 1.2 GB/s |
+| 16 B | 2.9 GB/s | 3.5 GB/s |
+| 100 B | 6.9 GB/s | 8.1 GB/s |
+| 4 KB | 11.7 GB/s | 16.7 GB/s |
+| 10 MB | 12.0 GB/s | 17.3 GB/s |
+
+These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C
+CPU using the following commands under Go 1.19.2:
+
+```
+benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$')
+benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$')
+```
+
+## Projects using this package
+
+- [InfluxDB](https://github.com/influxdata/influxdb)
+- [Prometheus](https://github.com/prometheus/prometheus)
+- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics)
+- [FreeCache](https://github.com/coocood/freecache)
+- [FastCache](https://github.com/VictoriaMetrics/fastcache)
diff --git a/vendor/github.com/cespare/xxhash/v2/testall.sh b/vendor/github.com/cespare/xxhash/v2/testall.sh
new file mode 100644
index 000000000..94b9c4439
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/v2/testall.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+set -eu -o pipefail
+
+# Small convenience script for running the tests with various combinations of
+# arch/tags. This assumes we're running on amd64 and have qemu available.
+
+go test ./...
+go test -tags purego ./...
+GOARCH=arm64 go test
+GOARCH=arm64 go test -tags purego
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash.go b/vendor/github.com/cespare/xxhash/v2/xxhash.go
new file mode 100644
index 000000000..a9e0d45c9
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash.go
@@ -0,0 +1,228 @@
+// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described
+// at http://cyan4973.github.io/xxHash/.
+package xxhash
+
+import (
+ "encoding/binary"
+ "errors"
+ "math/bits"
+)
+
+const (
+ prime1 uint64 = 11400714785074694791
+ prime2 uint64 = 14029467366897019727
+ prime3 uint64 = 1609587929392839161
+ prime4 uint64 = 9650029242287828579
+ prime5 uint64 = 2870177450012600261
+)
+
+// Store the primes in an array as well.
+//
+// The consts are used when possible in Go code to avoid MOVs but we need a
+// contiguous array of the assembly code.
+var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5}
+
+// Digest implements hash.Hash64.
+type Digest struct {
+ v1 uint64
+ v2 uint64
+ v3 uint64
+ v4 uint64
+ total uint64
+ mem [32]byte
+ n int // how much of mem is used
+}
+
+// New creates a new Digest that computes the 64-bit xxHash algorithm.
+func New() *Digest {
+ var d Digest
+ d.Reset()
+ return &d
+}
+
+// Reset clears the Digest's state so that it can be reused.
+func (d *Digest) Reset() {
+ d.v1 = primes[0] + prime2
+ d.v2 = prime2
+ d.v3 = 0
+ d.v4 = -primes[0]
+ d.total = 0
+ d.n = 0
+}
+
+// Size always returns 8 bytes.
+func (d *Digest) Size() int { return 8 }
+
+// BlockSize always returns 32 bytes.
+func (d *Digest) BlockSize() int { return 32 }
+
+// Write adds more data to d. It always returns len(b), nil.
+func (d *Digest) Write(b []byte) (n int, err error) {
+ n = len(b)
+ d.total += uint64(n)
+
+ memleft := d.mem[d.n&(len(d.mem)-1):]
+
+ if d.n+n < 32 {
+ // This new data doesn't even fill the current block.
+ copy(memleft, b)
+ d.n += n
+ return
+ }
+
+ if d.n > 0 {
+ // Finish off the partial block.
+ c := copy(memleft, b)
+ d.v1 = round(d.v1, u64(d.mem[0:8]))
+ d.v2 = round(d.v2, u64(d.mem[8:16]))
+ d.v3 = round(d.v3, u64(d.mem[16:24]))
+ d.v4 = round(d.v4, u64(d.mem[24:32]))
+ b = b[c:]
+ d.n = 0
+ }
+
+ if len(b) >= 32 {
+ // One or more full blocks left.
+ nw := writeBlocks(d, b)
+ b = b[nw:]
+ }
+
+ // Store any remaining partial block.
+ copy(d.mem[:], b)
+ d.n = len(b)
+
+ return
+}
+
+// Sum appends the current hash to b and returns the resulting slice.
+func (d *Digest) Sum(b []byte) []byte {
+ s := d.Sum64()
+ return append(
+ b,
+ byte(s>>56),
+ byte(s>>48),
+ byte(s>>40),
+ byte(s>>32),
+ byte(s>>24),
+ byte(s>>16),
+ byte(s>>8),
+ byte(s),
+ )
+}
+
+// Sum64 returns the current hash.
+func (d *Digest) Sum64() uint64 {
+ var h uint64
+
+ if d.total >= 32 {
+ v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
+ h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
+ h = mergeRound(h, v1)
+ h = mergeRound(h, v2)
+ h = mergeRound(h, v3)
+ h = mergeRound(h, v4)
+ } else {
+ h = d.v3 + prime5
+ }
+
+ h += d.total
+
+ b := d.mem[:d.n&(len(d.mem)-1)]
+ for ; len(b) >= 8; b = b[8:] {
+ k1 := round(0, u64(b[:8]))
+ h ^= k1
+ h = rol27(h)*prime1 + prime4
+ }
+ if len(b) >= 4 {
+ h ^= uint64(u32(b[:4])) * prime1
+ h = rol23(h)*prime2 + prime3
+ b = b[4:]
+ }
+ for ; len(b) > 0; b = b[1:] {
+ h ^= uint64(b[0]) * prime5
+ h = rol11(h) * prime1
+ }
+
+ h ^= h >> 33
+ h *= prime2
+ h ^= h >> 29
+ h *= prime3
+ h ^= h >> 32
+
+ return h
+}
+
+const (
+ magic = "xxh\x06"
+ marshaledSize = len(magic) + 8*5 + 32
+)
+
+// MarshalBinary implements the encoding.BinaryMarshaler interface.
+func (d *Digest) MarshalBinary() ([]byte, error) {
+ b := make([]byte, 0, marshaledSize)
+ b = append(b, magic...)
+ b = appendUint64(b, d.v1)
+ b = appendUint64(b, d.v2)
+ b = appendUint64(b, d.v3)
+ b = appendUint64(b, d.v4)
+ b = appendUint64(b, d.total)
+ b = append(b, d.mem[:d.n]...)
+ b = b[:len(b)+len(d.mem)-d.n]
+ return b, nil
+}
+
+// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
+func (d *Digest) UnmarshalBinary(b []byte) error {
+ if len(b) < len(magic) || string(b[:len(magic)]) != magic {
+ return errors.New("xxhash: invalid hash state identifier")
+ }
+ if len(b) != marshaledSize {
+ return errors.New("xxhash: invalid hash state size")
+ }
+ b = b[len(magic):]
+ b, d.v1 = consumeUint64(b)
+ b, d.v2 = consumeUint64(b)
+ b, d.v3 = consumeUint64(b)
+ b, d.v4 = consumeUint64(b)
+ b, d.total = consumeUint64(b)
+ copy(d.mem[:], b)
+ d.n = int(d.total % uint64(len(d.mem)))
+ return nil
+}
+
+func appendUint64(b []byte, x uint64) []byte {
+ var a [8]byte
+ binary.LittleEndian.PutUint64(a[:], x)
+ return append(b, a[:]...)
+}
+
+func consumeUint64(b []byte) ([]byte, uint64) {
+ x := u64(b)
+ return b[8:], x
+}
+
+func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) }
+func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) }
+
+func round(acc, input uint64) uint64 {
+ acc += input * prime2
+ acc = rol31(acc)
+ acc *= prime1
+ return acc
+}
+
+func mergeRound(acc, val uint64) uint64 {
+ val = round(0, val)
+ acc ^= val
+ acc = acc*prime1 + prime4
+ return acc
+}
+
+func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) }
+func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) }
+func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) }
+func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) }
+func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) }
+func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) }
+func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) }
+func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) }
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
new file mode 100644
index 000000000..3e8b13257
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
@@ -0,0 +1,209 @@
+//go:build !appengine && gc && !purego
+// +build !appengine
+// +build gc
+// +build !purego
+
+#include "textflag.h"
+
+// Registers:
+#define h AX
+#define d AX
+#define p SI // pointer to advance through b
+#define n DX
+#define end BX // loop end
+#define v1 R8
+#define v2 R9
+#define v3 R10
+#define v4 R11
+#define x R12
+#define prime1 R13
+#define prime2 R14
+#define prime4 DI
+
+#define round(acc, x) \
+ IMULQ prime2, x \
+ ADDQ x, acc \
+ ROLQ $31, acc \
+ IMULQ prime1, acc
+
+// round0 performs the operation x = round(0, x).
+#define round0(x) \
+ IMULQ prime2, x \
+ ROLQ $31, x \
+ IMULQ prime1, x
+
+// mergeRound applies a merge round on the two registers acc and x.
+// It assumes that prime1, prime2, and prime4 have been loaded.
+#define mergeRound(acc, x) \
+ round0(x) \
+ XORQ x, acc \
+ IMULQ prime1, acc \
+ ADDQ prime4, acc
+
+// blockLoop processes as many 32-byte blocks as possible,
+// updating v1, v2, v3, and v4. It assumes that there is at least one block
+// to process.
+#define blockLoop() \
+loop: \
+ MOVQ +0(p), x \
+ round(v1, x) \
+ MOVQ +8(p), x \
+ round(v2, x) \
+ MOVQ +16(p), x \
+ round(v3, x) \
+ MOVQ +24(p), x \
+ round(v4, x) \
+ ADDQ $32, p \
+ CMPQ p, end \
+ JLE loop
+
+// func Sum64(b []byte) uint64
+TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
+ // Load fixed primes.
+ MOVQ ·primes+0(SB), prime1
+ MOVQ ·primes+8(SB), prime2
+ MOVQ ·primes+24(SB), prime4
+
+ // Load slice.
+ MOVQ b_base+0(FP), p
+ MOVQ b_len+8(FP), n
+ LEAQ (p)(n*1), end
+
+ // The first loop limit will be len(b)-32.
+ SUBQ $32, end
+
+ // Check whether we have at least one block.
+ CMPQ n, $32
+ JLT noBlocks
+
+ // Set up initial state (v1, v2, v3, v4).
+ MOVQ prime1, v1
+ ADDQ prime2, v1
+ MOVQ prime2, v2
+ XORQ v3, v3
+ XORQ v4, v4
+ SUBQ prime1, v4
+
+ blockLoop()
+
+ MOVQ v1, h
+ ROLQ $1, h
+ MOVQ v2, x
+ ROLQ $7, x
+ ADDQ x, h
+ MOVQ v3, x
+ ROLQ $12, x
+ ADDQ x, h
+ MOVQ v4, x
+ ROLQ $18, x
+ ADDQ x, h
+
+ mergeRound(h, v1)
+ mergeRound(h, v2)
+ mergeRound(h, v3)
+ mergeRound(h, v4)
+
+ JMP afterBlocks
+
+noBlocks:
+ MOVQ ·primes+32(SB), h
+
+afterBlocks:
+ ADDQ n, h
+
+ ADDQ $24, end
+ CMPQ p, end
+ JG try4
+
+loop8:
+ MOVQ (p), x
+ ADDQ $8, p
+ round0(x)
+ XORQ x, h
+ ROLQ $27, h
+ IMULQ prime1, h
+ ADDQ prime4, h
+
+ CMPQ p, end
+ JLE loop8
+
+try4:
+ ADDQ $4, end
+ CMPQ p, end
+ JG try1
+
+ MOVL (p), x
+ ADDQ $4, p
+ IMULQ prime1, x
+ XORQ x, h
+
+ ROLQ $23, h
+ IMULQ prime2, h
+ ADDQ ·primes+16(SB), h
+
+try1:
+ ADDQ $4, end
+ CMPQ p, end
+ JGE finalize
+
+loop1:
+ MOVBQZX (p), x
+ ADDQ $1, p
+ IMULQ ·primes+32(SB), x
+ XORQ x, h
+ ROLQ $11, h
+ IMULQ prime1, h
+
+ CMPQ p, end
+ JL loop1
+
+finalize:
+ MOVQ h, x
+ SHRQ $33, x
+ XORQ x, h
+ IMULQ prime2, h
+ MOVQ h, x
+ SHRQ $29, x
+ XORQ x, h
+ IMULQ ·primes+16(SB), h
+ MOVQ h, x
+ SHRQ $32, x
+ XORQ x, h
+
+ MOVQ h, ret+24(FP)
+ RET
+
+// func writeBlocks(d *Digest, b []byte) int
+TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
+ // Load fixed primes needed for round.
+ MOVQ ·primes+0(SB), prime1
+ MOVQ ·primes+8(SB), prime2
+
+ // Load slice.
+ MOVQ b_base+8(FP), p
+ MOVQ b_len+16(FP), n
+ LEAQ (p)(n*1), end
+ SUBQ $32, end
+
+ // Load vN from d.
+ MOVQ s+0(FP), d
+ MOVQ 0(d), v1
+ MOVQ 8(d), v2
+ MOVQ 16(d), v3
+ MOVQ 24(d), v4
+
+ // We don't need to check the loop condition here; this function is
+ // always called with at least one block of data to process.
+ blockLoop()
+
+ // Copy vN back to d.
+ MOVQ v1, 0(d)
+ MOVQ v2, 8(d)
+ MOVQ v3, 16(d)
+ MOVQ v4, 24(d)
+
+ // The number of bytes written is p minus the old base pointer.
+ SUBQ b_base+8(FP), p
+ MOVQ p, ret+32(FP)
+
+ RET
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s
new file mode 100644
index 000000000..7e3145a22
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s
@@ -0,0 +1,183 @@
+//go:build !appengine && gc && !purego
+// +build !appengine
+// +build gc
+// +build !purego
+
+#include "textflag.h"
+
+// Registers:
+#define digest R1
+#define h R2 // return value
+#define p R3 // input pointer
+#define n R4 // input length
+#define nblocks R5 // n / 32
+#define prime1 R7
+#define prime2 R8
+#define prime3 R9
+#define prime4 R10
+#define prime5 R11
+#define v1 R12
+#define v2 R13
+#define v3 R14
+#define v4 R15
+#define x1 R20
+#define x2 R21
+#define x3 R22
+#define x4 R23
+
+#define round(acc, x) \
+ MADD prime2, acc, x, acc \
+ ROR $64-31, acc \
+ MUL prime1, acc
+
+// round0 performs the operation x = round(0, x).
+#define round0(x) \
+ MUL prime2, x \
+ ROR $64-31, x \
+ MUL prime1, x
+
+#define mergeRound(acc, x) \
+ round0(x) \
+ EOR x, acc \
+ MADD acc, prime4, prime1, acc
+
+// blockLoop processes as many 32-byte blocks as possible,
+// updating v1, v2, v3, and v4. It assumes that n >= 32.
+#define blockLoop() \
+ LSR $5, n, nblocks \
+ PCALIGN $16 \
+ loop: \
+ LDP.P 16(p), (x1, x2) \
+ LDP.P 16(p), (x3, x4) \
+ round(v1, x1) \
+ round(v2, x2) \
+ round(v3, x3) \
+ round(v4, x4) \
+ SUB $1, nblocks \
+ CBNZ nblocks, loop
+
+// func Sum64(b []byte) uint64
+TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
+ LDP b_base+0(FP), (p, n)
+
+ LDP ·primes+0(SB), (prime1, prime2)
+ LDP ·primes+16(SB), (prime3, prime4)
+ MOVD ·primes+32(SB), prime5
+
+ CMP $32, n
+ CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 }
+ BLT afterLoop
+
+ ADD prime1, prime2, v1
+ MOVD prime2, v2
+ MOVD $0, v3
+ NEG prime1, v4
+
+ blockLoop()
+
+ ROR $64-1, v1, x1
+ ROR $64-7, v2, x2
+ ADD x1, x2
+ ROR $64-12, v3, x3
+ ROR $64-18, v4, x4
+ ADD x3, x4
+ ADD x2, x4, h
+
+ mergeRound(h, v1)
+ mergeRound(h, v2)
+ mergeRound(h, v3)
+ mergeRound(h, v4)
+
+afterLoop:
+ ADD n, h
+
+ TBZ $4, n, try8
+ LDP.P 16(p), (x1, x2)
+
+ round0(x1)
+
+ // NOTE: here and below, sequencing the EOR after the ROR (using a
+ // rotated register) is worth a small but measurable speedup for small
+ // inputs.
+ ROR $64-27, h
+ EOR x1 @> 64-27, h, h
+ MADD h, prime4, prime1, h
+
+ round0(x2)
+ ROR $64-27, h
+ EOR x2 @> 64-27, h, h
+ MADD h, prime4, prime1, h
+
+try8:
+ TBZ $3, n, try4
+ MOVD.P 8(p), x1
+
+ round0(x1)
+ ROR $64-27, h
+ EOR x1 @> 64-27, h, h
+ MADD h, prime4, prime1, h
+
+try4:
+ TBZ $2, n, try2
+ MOVWU.P 4(p), x2
+
+ MUL prime1, x2
+ ROR $64-23, h
+ EOR x2 @> 64-23, h, h
+ MADD h, prime3, prime2, h
+
+try2:
+ TBZ $1, n, try1
+ MOVHU.P 2(p), x3
+ AND $255, x3, x1
+ LSR $8, x3, x2
+
+ MUL prime5, x1
+ ROR $64-11, h
+ EOR x1 @> 64-11, h, h
+ MUL prime1, h
+
+ MUL prime5, x2
+ ROR $64-11, h
+ EOR x2 @> 64-11, h, h
+ MUL prime1, h
+
+try1:
+ TBZ $0, n, finalize
+ MOVBU (p), x4
+
+ MUL prime5, x4
+ ROR $64-11, h
+ EOR x4 @> 64-11, h, h
+ MUL prime1, h
+
+finalize:
+ EOR h >> 33, h
+ MUL prime2, h
+ EOR h >> 29, h
+ MUL prime3, h
+ EOR h >> 32, h
+
+ MOVD h, ret+24(FP)
+ RET
+
+// func writeBlocks(d *Digest, b []byte) int
+TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
+ LDP ·primes+0(SB), (prime1, prime2)
+
+ // Load state. Assume v[1-4] are stored contiguously.
+ MOVD d+0(FP), digest
+ LDP 0(digest), (v1, v2)
+ LDP 16(digest), (v3, v4)
+
+ LDP b_base+8(FP), (p, n)
+
+ blockLoop()
+
+ // Store updated state.
+ STP (v1, v2), 0(digest)
+ STP (v3, v4), 16(digest)
+
+ BIC $31, n
+ MOVD n, ret+32(FP)
+ RET
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go
new file mode 100644
index 000000000..9216e0a40
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go
@@ -0,0 +1,15 @@
+//go:build (amd64 || arm64) && !appengine && gc && !purego
+// +build amd64 arm64
+// +build !appengine
+// +build gc
+// +build !purego
+
+package xxhash
+
+// Sum64 computes the 64-bit xxHash digest of b.
+//
+//go:noescape
+func Sum64(b []byte) uint64
+
+//go:noescape
+func writeBlocks(d *Digest, b []byte) int
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go
new file mode 100644
index 000000000..26df13bba
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go
@@ -0,0 +1,76 @@
+//go:build (!amd64 && !arm64) || appengine || !gc || purego
+// +build !amd64,!arm64 appengine !gc purego
+
+package xxhash
+
+// Sum64 computes the 64-bit xxHash digest of b.
+func Sum64(b []byte) uint64 {
+ // A simpler version would be
+ // d := New()
+ // d.Write(b)
+ // return d.Sum64()
+ // but this is faster, particularly for small inputs.
+
+ n := len(b)
+ var h uint64
+
+ if n >= 32 {
+ v1 := primes[0] + prime2
+ v2 := prime2
+ v3 := uint64(0)
+ v4 := -primes[0]
+ for len(b) >= 32 {
+ v1 = round(v1, u64(b[0:8:len(b)]))
+ v2 = round(v2, u64(b[8:16:len(b)]))
+ v3 = round(v3, u64(b[16:24:len(b)]))
+ v4 = round(v4, u64(b[24:32:len(b)]))
+ b = b[32:len(b):len(b)]
+ }
+ h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
+ h = mergeRound(h, v1)
+ h = mergeRound(h, v2)
+ h = mergeRound(h, v3)
+ h = mergeRound(h, v4)
+ } else {
+ h = prime5
+ }
+
+ h += uint64(n)
+
+ for ; len(b) >= 8; b = b[8:] {
+ k1 := round(0, u64(b[:8]))
+ h ^= k1
+ h = rol27(h)*prime1 + prime4
+ }
+ if len(b) >= 4 {
+ h ^= uint64(u32(b[:4])) * prime1
+ h = rol23(h)*prime2 + prime3
+ b = b[4:]
+ }
+ for ; len(b) > 0; b = b[1:] {
+ h ^= uint64(b[0]) * prime5
+ h = rol11(h) * prime1
+ }
+
+ h ^= h >> 33
+ h *= prime2
+ h ^= h >> 29
+ h *= prime3
+ h ^= h >> 32
+
+ return h
+}
+
+func writeBlocks(d *Digest, b []byte) int {
+ v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
+ n := len(b)
+ for len(b) >= 32 {
+ v1 = round(v1, u64(b[0:8:len(b)]))
+ v2 = round(v2, u64(b[8:16:len(b)]))
+ v3 = round(v3, u64(b[16:24:len(b)]))
+ v4 = round(v4, u64(b[24:32:len(b)]))
+ b = b[32:len(b):len(b)]
+ }
+ d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4
+ return n - len(b)
+}
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
new file mode 100644
index 000000000..e86f1b5fd
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
@@ -0,0 +1,16 @@
+//go:build appengine
+// +build appengine
+
+// This file contains the safe implementations of otherwise unsafe-using code.
+
+package xxhash
+
+// Sum64String computes the 64-bit xxHash digest of s.
+func Sum64String(s string) uint64 {
+ return Sum64([]byte(s))
+}
+
+// WriteString adds more data to d. It always returns len(s), nil.
+func (d *Digest) WriteString(s string) (n int, err error) {
+ return d.Write([]byte(s))
+}
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
new file mode 100644
index 000000000..1c1638fd8
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
@@ -0,0 +1,58 @@
+//go:build !appengine
+// +build !appengine
+
+// This file encapsulates usage of unsafe.
+// xxhash_safe.go contains the safe implementations.
+
+package xxhash
+
+import (
+ "unsafe"
+)
+
+// In the future it's possible that compiler optimizations will make these
+// XxxString functions unnecessary by realizing that calls such as
+// Sum64([]byte(s)) don't need to copy s. See https://go.dev/issue/2205.
+// If that happens, even if we keep these functions they can be replaced with
+// the trivial safe code.
+
+// NOTE: The usual way of doing an unsafe string-to-[]byte conversion is:
+//
+// var b []byte
+// bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+// bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
+// bh.Len = len(s)
+// bh.Cap = len(s)
+//
+// Unfortunately, as of Go 1.15.3 the inliner's cost model assigns a high enough
+// weight to this sequence of expressions that any function that uses it will
+// not be inlined. Instead, the functions below use a different unsafe
+// conversion designed to minimize the inliner weight and allow both to be
+// inlined. There is also a test (TestInlining) which verifies that these are
+// inlined.
+//
+// See https://github.com/golang/go/issues/42739 for discussion.
+
+// Sum64String computes the 64-bit xxHash digest of s.
+// It may be faster than Sum64([]byte(s)) by avoiding a copy.
+func Sum64String(s string) uint64 {
+ b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)}))
+ return Sum64(b)
+}
+
+// WriteString adds more data to d. It always returns len(s), nil.
+// It may be faster than Write([]byte(s)) by avoiding a copy.
+func (d *Digest) WriteString(s string) (n int, err error) {
+ d.Write(*(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)})))
+ // d.Write always returns len(s), nil.
+ // Ignoring the return output and returning these fixed values buys a
+ // savings of 6 in the inliner's cost model.
+ return len(s), nil
+}
+
+// sliceHeader is similar to reflect.SliceHeader, but it assumes that the layout
+// of the first two words is the same as the layout of a string.
+type sliceHeader struct {
+ s string
+ cap int
+}
diff --git a/vendor/github.com/cilium/cilium/AUTHORS b/vendor/github.com/cilium/cilium/AUTHORS
index 63e7075b9..5815296e7 100644
--- a/vendor/github.com/cilium/cilium/AUTHORS
+++ b/vendor/github.com/cilium/cilium/AUTHORS
@@ -13,7 +13,7 @@ Aditya Purandare aditya.p1993@hotmail.com
Aditya Sharma aditya.sharma@shopify.com
Adrien Trouillaud adrienjt@users.noreply.github.com
Ahmed Bebars 1381372+abebars@users.noreply.github.com
-Akhil Velagapudi 4@4khil.com
+Akhil Velagapudi avelagap@google.com
Akshat Agarwal humancalico@disroot.org
Alan Kutniewski kutniewski@google.com
Alban Crequy alban@kinvolk.io
@@ -28,13 +28,17 @@ Alexey Grevtsev alexey.grevtcev@gmail.com
Alex Katsman alexkats@google.com
Alex Romanov alex@romanov.ws
Alex Szakaly alex.szakaly@gmail.com
+Alex Waring alex.waring@starlingbank.com
Amey Bhide amey@covalent.io
+Amir Kheirkhahan amir.kheirkhahan@dbschenker.com
amitmavgupta 115551423+amitmavgupta@users.noreply.github.com
Amol Ambekar ambekara@google.com
Amre Shakimov amre@covalent.io
Anderson, David L david.l.anderson@intel.com
Andor Nemeth andor_nemeth@swissre.com
+Andreas Mårtensson andreas@addem.se
Andree Klattenhoff mail@andr.ee
+Andrei Kvapil kvapss@gmail.com
André Martins andre@cilium.io
Andrew Bulford andrew.bulford@form3.tech
Andrew Holt andrew.holt@utmost.co
@@ -43,8 +47,11 @@ Andrew Sy Kim kim.andrewsy@gmail.com
Andrey Devyatkin andrey.devyatkin@fivexl.io
Andrey Klimentyev andrey.klimentyev@flant.com
Andrey Voronkov voronkovaa@gmail.com
+Andrii Iuspin yuspin@gmail.com
Andrzej Mamak nqaegg@gmail.com
Andy Allred andy@punasusi.com
+andychuang andy.chuang@shoplineapp.com
+Animesh Pathak 53110238+Sonichigo@users.noreply.github.com
Aniruddha Amit Dutta duttaaniruddha31@gmail.com
Anish Shah anishshah@google.com
Anit Gandhi anitgandhi@gmail.com
@@ -54,6 +61,8 @@ Anthony Rabbito hello@anthonyrabbito.com
Antoine Coetsier acr@exoscale.ch
Antoine Legrand 2t.antoine@gmail.com
Antonio Ojea aojea@google.com
+Anton Ippolitov anton.ippolitov@datadoghq.com
+Antoni Zawodny zawodny@google.com
Anton Protopopov aspsk@isovalent.com
Anton Tykhyy atykhyy@gmail.com
Anurag Aggarwal anurag.aggarwal@flipkart.com
@@ -71,6 +80,7 @@ Assiya Khuzyakhmetova assiya.khuzyakhmetova@nu.edu.kz
Atkins Chang atkinschang@gmail.com
Augustas Berneckas a.berneckas@gmail.com
Austin Cawley-Edwards austin.cawley@gmail.com
+AwesomePatrol AwesomePatrol@users.noreply.github.com
ayesha khaliq ayeshakhaliqrana@gmail.com
Ayush Dwivedi ayush.dwivedi@accuknox.com
Barun Acharya barun1024@gmail.com
@@ -79,6 +89,7 @@ Beatriz Martínez beatriz@isovalent.com
Benjamin Leggett benjamin.leggett@solo.io
Benjamin Pineau benjamin.pineau@datadoghq.com
Benoît Sauvère benoit.sauvere@backmarket.com
+Bernard Halas bernard.halas@berops.com
Bill Mulligan billmulligan516@gmail.com
Bingshen Wang bingshen.wbs@alibaba-inc.com
Bingwu Yang detailyang@gmail.com
@@ -93,6 +104,7 @@ Brandon McNama brandonmcnama@outlook.com
Brian Topping brian@coglative.com
Bruno Miguel Custódio brunomcustodio@gmail.com
Bryan Stenson bryan.stenson@okta.com
+bzsuni bingzhe.sun@daocloud.io
Calum MacRae hi@cmacr.ae
Camilo Schoeningh camilo.schoeningh@dunnhumby.com
Canh Ngo canhnt@gmail.com
@@ -105,7 +117,9 @@ Chance Zibolski chance.zibolski@gmail.com
Changyu Wang changyuwang@tencent.com
Charles-Edouard Brétéché charled.breteche@gmail.com
Charles-Henri Guérin charles-henri.guerin@zenika.com
+chaunceyjiang chaunceyjiang@gmail.com
Chen Kang kongchen28@gmail.com
+chentanjun tanjunchen20@gmail.com
chenyahui chenyahui9@jd.com
Chen Yaqi chenyaqi01@baidu.com
chenyuezhou zcy.chenyue.zhou@gmail.com
@@ -113,6 +127,7 @@ Chris Tarazi chris@isovalent.com
Christian Hörtnagl christian2@univie.ac.at
Christian Hüning christian.huening@finleap.com
Christine Chen christine.chen@datadoghq.com
+Christine Kim xtineskim@gmail.com
Christopher Biscardi chris@christopherbiscardi.com
Christopher Schmidt fakod666@gmail.com
Chris Werner Rau cwrau@cwrau.info
@@ -130,8 +145,10 @@ cui fliter imcusg@gmail.com
Cynthia Thomas cynthia@covalent.io
Cyril Corbon corboncyril@gmail.com
Cyril Scetbon cscetbon@gmail.com
+czybjtu smartczy@outlook.com
Dale Ragan dale.ragan@sap.com
Dalton Hubble dghubble@gmail.com
+Dan Everton deverton@godaddy.com
Daneyon Hansen daneyon.hansen@solo.io
Đặng Minh Dũng dungdm93@live.com
Daniel Borkmann daniel@iogearbox.net
@@ -143,8 +160,7 @@ Daniel T. Lee danieltimlee@gmail.com
Danni Skov Høglund skuffe@pwnz.dk
Dan Sexton dan.b.sexton@gmail.com
Dan Wendlandt dan@covalent.io
-Dario Mader 9934402+darox@users.noreply.github.com
-darox maderdario@gmail.com
+Dario Mader maderdario@gmail.com
Darren Foo darren.foo@shopify.com
Darren Mackintosh unixdaddy@gmail.com
Darshan Chaudhary deathbullet@gmail.com
@@ -155,11 +171,13 @@ David Calvert david@0xdc.me
David Chen davidchen94@outlook.com
David Donchez donch@dailymotion.com
David Korczynski david@adalogics.com
+David Leadbeater dgl@dgl.cx
David Schlosnagle davids@palantir.com
David Wolffberg 1350533+wolffberg@users.noreply.github.com
Dawn lx1960753013@gmail.com
Deepesha Burse deepesha.3007@gmail.com
Deepesh Pathak deepshpathak@gmail.com
+Denis GERMAIN dgermain@deezer.com
Denis Khachyan khachyanda.gmail.com
Derek Gaffney 17263955+gaffneyd4@users.noreply.github.com
Deshi Xiao xiaods@gmail.com
@@ -185,9 +203,10 @@ Dorde Lapcevic dordel@google.com
Duffie Cooley dcooley@isovalent.com
Dylan Reimerink dylan.reimerink@isovalent.com
Ekene Nwobodo nwobodoe71@gmail.com
+Electron alokaks601@gmail.com
El-Fadel Bonfoh elfadel@accuknox.com
Ellie Springsteen ellie.springsteen@appian.com
-Eloy Coto eloy.coto@gmail.com
+Eloy Coto eloy.coto@acalustra.com
Emin Aktas eminaktas34@gmail.com
Emmanuel T Odeke emmanuel@orijtech.com
Emre Savcı emre.savci@trendyol.com
@@ -201,6 +220,7 @@ Eric Ripa eric@ripa.io
Erik Chang erik.chang@nordstrom.com
Eugene Starchenko 17835122+eugenestarchenko@users.noreply.github.com
Ewout Prangsma ewout@prangsma.net
+Fabian Fischer fabian.fischer@isovalent.com
Fabio Falzoi fabio.falzoi@isovalent.com
Faiyaz Ahmed faiyaza@gmail.com
Fankaixi Li fankaixi.li@bytedance.com
@@ -237,6 +257,7 @@ Gray Lian gray.liang@isovalent.com
Guilherme Oki guilherme.oki@wildlifestudios.com
Guilherme Souza 101073+guilhermef@users.noreply.github.com
Gunju Kim gjkim042@gmail.com
+guoguangwu guoguangwu@magic-shield.com
Haitao Li lihaitao@gmail.com
Haiyue Wang haiyue.wang@intel.com
Hang Yan hang.yan@hotmail.com
@@ -250,6 +271,7 @@ Heiko Rothe me@heikorothe.com
Hemanth Malla hemanth.malla@datadoghq.com
Hemslo Wang hemslo.wang@gmail.com
Hrittik hrittikcom@gmail.com
+Huagong Wang wanghuagong@kylinos.cn
huangxuesen huangxuesen@kuaishou.com
Hui Kong hui.kong@qunar.com
Hunter Massey hmassey@tradestation.com
@@ -258,6 +280,8 @@ Ian Vernon ian@cilium.io
Ifeanyi Ubah ify1992@yahoo.com
Ilya Dmitrichenko errordeveloper@gmail.com
Ilya Shaisultanov ilya.shaisultanov@gmail.com
+Ioannis Androulidakis androulidakis.ioannis@gmail.com
+ishuar ishansharma887@gmail.com
Ivan Makarychev i.makarychev@tinkoff.ru
Ivar Lazzaro ivarlazzaro@gmail.com
Jack-R-lantern tjdfkr2421@gmail.com
@@ -268,6 +292,7 @@ James Brookes jbrookes@confluent.io
James Laverack james@isovalent.com
James McShane james.mcshane@superorbital.io
Jan-Erik Rediger janerik@fnordig.de
+Jan Jansen jan.jansen@gdata.de
Jan Mraz strudelpi@pm.me
Jarno Rajahalme jarno@isovalent.com
Jean Raby jean@raby.sh
@@ -314,6 +339,7 @@ Junli Ou oujunli306@gmail.com
Jussi Maki jussi@isovalent.com
kahirokunn okinakahiro@gmail.com
Kaito Ii kaitoii1111@gmail.com
+Kaloyan Yordanov Kaloyan.Yordanov@starlizard.com
Kamil Lach kamil.lach.rs@gmail.com
Karim Naufal rimkashox@gmail.com
Karl Heins karlheins@northwesternmutual.com
@@ -330,12 +356,15 @@ Kir Kolyshkin kolyshkin@gmail.com
Koichiro Den den@klaipeden.com
Konstantin Aksenov konstantin.aksenov@flant.com
Kornilios Kourtis kornilios@isovalent.com
+kwakubiney kebiney@hotmail.com
Laurent Bernaille laurent.bernaille@datadoghq.com
+Lawrence Gadban lawrence.gadban@solo.io
Lehner Florian dev@der-flo.net
Leonard Cohnen lc@edgeless.systems
leonliao xiaobo.liao@gmail.com
Liang Zhou zhoul110@chinatelecom.cn
Li Chengyuan chengyuanli@hotmail.com
+Li Chun lichun823@gmail.com
LiHui andrewli@yunify.com
Lin Dong lindongld@google.com
Lin Sun lin.sun@solo.io
@@ -350,6 +379,7 @@ Lorenz Bauer lmb@isovalent.com
Lorenzo Fundaró lorenzofundaro@gmail.com
Louis DeLosSantos louis@isovalent.com
lou-lan loulan@loulan.me
+Lucas Leblow lucasleblow@mailbox.org
lucming 2876757716@qq.com
Maartje Eyskens maartje.eyskens@isovalent.com
Maciej Fijalkowski maciej.fijalkowski@intel.com
@@ -363,16 +393,19 @@ Maksym Lushpenko iviakciivi@gmail.com
Manali Bhutiyani manali@covalent.io
Mandar U Jog mjog@google.com
Manuel Buil mbuil@suse.com
+Manuel Rüger manuel@rueg.eu
Manuel Stößel manuel.stoessel@t-systems.com
Marcel Zieba marcel.zieba@isovalent.com
Marcin Skarbek git@skarbek.name
Marcin Swiderski forgems@gmail.com
+Marco Aurelio Caldas Miranda 17923899+macmiranda@users.noreply.github.com
Marco Hofstetter marco.hofstetter@isovalent.com
Marco Iorio marco.iorio@isovalent.com
Marco Kilchhofer mkilchhofer@users.noreply.github.com
Marc Stulz m@footek.ch
Marek Chodor mchodor@google.com
Marga Manterola marga@isovalent.com
+Marino Wijay 45947861+distributethe6ix@users.noreply.github.com
Mario Constanti mario@constanti.de
Marius Gerling marius.gerling@uniberg.com
Mark deVilliers markdevilliers@gmail.com
@@ -388,6 +421,7 @@ Matej Gera matejgera@gmail.com
Mathias Herzog mathu@gmx.ch
Mathieu Parent math.parent@gmail.com
Mathieu Tortuyaux mtortuyaux@microsoft.com
+Mathis Joffre 51022808+Joffref@users.noreply.github.com
Matt Anderson matanderson@equinix.com
Matthew Fenwick mfenwick100@gmail.com
Matthew Gumport me@gum.pt
@@ -409,6 +443,7 @@ Michael Francis michael@melenion.com
Michael Kashin mmkashin@gmail.com
Michael Petrov michael@openai.com
Michael Ryan Dempsey bluestealth@bluestealth.pw
+michaelsaah michael.saah@segment.com
Michael Schubert michael@kinvolk.io
Michael Vorburger vorburger@redhat.com
Michal Rostecki vadorovsky@gmail.com
@@ -436,6 +471,7 @@ Neil Wilson neil@aldur.co.uk
Nick M 4718+rkage@users.noreply.github.com
Nick Young nick@isovalent.com
Niclas Mietz solidnerd@users.noreply.github.com
+Nico Berlee nico.berlee@on2it.net
Nicolas Busseneau nicolas@isovalent.com
Nico Vibert nicolas.vibert@isovalent.com
Nikhil Jha nikhiljha@users.noreply.github.com
@@ -447,6 +483,7 @@ Nishant Burte nburte@google.com
Nitish Malhotra nitishm@microsoft.com
Noel Georgi git@frezbo.dev
nrnrk noriki6t@gmail.com
+nuwa nuwa@yannis.codes
Odin Ugedal ougedal@palantir.com
Oilbeater mengxin@alauda.io
Oksana Baranova oksana.baranova@intel.com
@@ -456,14 +493,17 @@ Oliver Ni oliver.ni@gmail.com
Oliver Wang a0924100192@gmail.com
Omar Aloraini ooraini.dev@gmail.com
Ondrej Blazek ondrej.blazek@firma.seznam.cz
+Osthues osthues.matthias@gmail.com
Pablo Ruiz pablo.ruiz@gmail.com
Paco Xu paco.xu@daocloud.io
Parth Patel parth.psu@gmail.com
Patrice Chalin chalin@cncf.io
Patrice Peterson patrice.peterson@mailbox.org
Patrick Mahoney pmahoney@greenkeytech.com
+Patrick Reich patrick@neodyme.io
Pat Riehecky riehecky@fnal.gov
Patrik Cyvoct patrik@ptrk.io
+Paul Bailey spacepants@users.noreply.github.com
Paul Chaignon paul.chaignon@gmail.com
Paulo Gomes pjbgf@linux.com
Pavel Pavlov 40396270+PavelPavlov46@users.noreply.github.com
@@ -494,6 +534,7 @@ Rajat Jindal rajatjindal83@gmail.com
Raphael Campos raphael@accuknox.com
Raphaël Pinson raphael@isovalent.com
Rastislav Szabo rastislav.szabo@isovalent.com
+Rauan Mayemir rauan@mayemir.io
Ray Bejjani ray.bejjani@gmail.com
Raymond de Jong raymond.dejong@isovalent.com
Reilly Brogan reilly@reillybrogan.com
@@ -501,7 +542,11 @@ Rei Shimizu Shikugawa@gmail.com
Rémy Léone rleone@scaleway.com
Renat Tuktarov yandzeek@gmail.com
Rene Luria rene@luria.ch
+René Veenhuis re.veenhuis@gmail.com
Rene Zbinden rene.zbinden@postfinance.ch
+Richard Lavoie richard.lavoie@logmein.com
+Richard Tweed RichardoC@users.noreply.github.com
+Ricky Ho horicky78@gmail.com
Rio Kierkels riokierkels@gmail.com
Robin Gögge r.goegge@isovalent.com
Robin Hahling robin.hahling@gw-computing.net
@@ -529,6 +574,7 @@ Sander Timmerman stimmerman@schubergphilis.com
Sandipan Panda samparksandipan@gmail.com
Sarah Corleissen sarah.corleissen@isovalent.com
Sarvesh Rangnekar sarveshr@google.com
+Satish Matti smatti@google.com
Scott Albertson ascottalbertson@gmail.com
Sean Winn sean@isovalent.com
Sebastian Nickel nick@nine.ch
@@ -539,12 +585,14 @@ Sergey Generalov sergey@isovalent.com
Sergey Monakhov monakhov@puzl.ee
Sergey Shevchenko sergeyshevchdevelop@gmail.com
Sergio Ballesteros snaker@locolandia.net
+sh2 shawnhxh@outlook.com
Shane Utt shaneutt@linux.com
Shantanu Deshpande shantanud106@gmail.com
Shunpoco tkngsnsk313320@gmail.com
Sigurd Spieckermann sigurd.spieckermann@gmail.com
Simone Sciarrati s.sciarrati@gmail.com
Simon Pasquier spasquier@mirantis.com
+sknop 118932232+sknop-cgn@users.noreply.github.com
Smaine Kahlouch smainklh@gmail.com
spacewander spacewanderlzx@gmail.com
Stacy Kim stacy.kim@ucla.edu
@@ -558,6 +606,7 @@ Stevo Slavić sslavic@gmail.com
Stijn Smits stijn@stijn98s.nl
Strukov Anton anstrukov@luxoft.com
Stuart Preston mail@stuartpreston.net
+Su Fei sofat1989@126.com
Sugang Li sugangli@google.com
Sven Haardiek sven.haardiek@uni-muenster.de
Swaminathan Vasudevan svasudevan@suse.com
@@ -576,6 +625,7 @@ Thomas Bachman tbachman@yahoo.com
Thomas Balthazar thomas@balthazar.info
Thomas Gosteli thomas.gosteli@protonmail.com
Thomas Graf thomas@cilium.io
+Thorben von Hacht tvonhacht@apple.com
tigerK yanru.lv@daocloud.io
Tim Horner timothy.horner@isovalent.com
Timo Beckers timo@isovalent.com
@@ -593,11 +643,13 @@ Tony Lambiris tony@criticalstack.com
Tony Lu tonylu@linux.alibaba.com
Tony Norlin tony.norlin@localdomain.se
Tore S. Loenoey tore.lonoy@gmail.com
+toVersus toversus2357@gmail.com
Travis Glenn Hansen travisghansen@yahoo.com
Trevor Roberts Jr Trevor.Roberts.Jr@gmail.com
Trevor Tao trevor.tao@arm.com
Umesh Keerthy B S umesh.freelance@gmail.com
Vadim Ponomarev velizarx@gmail.com
+vakr vakr@microsoft.com
Valas Valancius valas@google.com
Vance Li vanceli@tencent.com
Vigneshwaren Sunder vickymailed@gmail.com
@@ -606,7 +658,7 @@ Viktor Kuzmin kvaster@gmail.com
Viktor Oreshkin imselfish@stek29.rocks
Ville Ojamo bluikko@users.noreply.github.com
Vincent Li vincent.mc.li@gmail.com
-Vipul Singh vipul21sept@gmail.com
+Vipul Singh singhvipul@microsoft.com
Vishal Choudhary sendtovishalchoudhary@gmail.com
Vishnu Soman K vishnusomank05@gmail.com
Vlad Artamonov 742047+vladdy@users.noreply.github.com
@@ -637,6 +689,7 @@ Xin Li xin.li@daocloud.io
Xinyuan Zhang zhangxinyuan@google.com
yanggang gang.yang@daocloud.io
yanhongchang yanhongchang@100tal.com
+Yash Shetty yashshetty@google.com
Ye Sijun junnplus@gmail.com
Yiannis Yiakoumis yiannis@selfienetworks.com
Yongkun Gui ygui@google.com
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/bgp/bgp_client.go b/vendor/github.com/cilium/cilium/api/v1/client/bgp/bgp_client.go
new file mode 100644
index 000000000..2713952d4
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/bgp/bgp_client.go
@@ -0,0 +1,129 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package bgp
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+)
+
+// New creates a new bgp API client.
+func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService {
+ return &Client{transport: transport, formats: formats}
+}
+
+/*
+Client for bgp API
+*/
+type Client struct {
+ transport runtime.ClientTransport
+ formats strfmt.Registry
+}
+
+// ClientOption is the option for Client methods
+type ClientOption func(*runtime.ClientOperation)
+
+// ClientService is the interface for Client methods
+type ClientService interface {
+ GetBgpPeers(params *GetBgpPeersParams, opts ...ClientOption) (*GetBgpPeersOK, error)
+
+ GetBgpRoutes(params *GetBgpRoutesParams, opts ...ClientOption) (*GetBgpRoutesOK, error)
+
+ SetTransport(transport runtime.ClientTransport)
+}
+
+/*
+ GetBgpPeers lists operational state of b g p peers
+
+ Retrieves current operational state of BGP peers created by
+
+Cilium BGP virtual router. This includes session state, uptime,
+information per address family, etc.
+*/
+func (a *Client) GetBgpPeers(params *GetBgpPeersParams, opts ...ClientOption) (*GetBgpPeersOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewGetBgpPeersParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "GetBgpPeers",
+ Method: "GET",
+ PathPattern: "/bgp/peers",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &GetBgpPeersReader{formats: a.formats},
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*GetBgpPeersOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for GetBgpPeers: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+/*
+GetBgpRoutes lists b g p routes from b g p control plane r i b
+
+Retrieves routes from BGP Control Plane RIB filtered by parameters you specify
+*/
+func (a *Client) GetBgpRoutes(params *GetBgpRoutesParams, opts ...ClientOption) (*GetBgpRoutesOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewGetBgpRoutesParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "GetBgpRoutes",
+ Method: "GET",
+ PathPattern: "/bgp/routes",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &GetBgpRoutesReader{formats: a.formats},
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*GetBgpRoutesOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for GetBgpRoutes: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+// SetTransport changes the transport on the client
+func (a *Client) SetTransport(transport runtime.ClientTransport) {
+ a.transport = transport
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/bgp/get_bgp_peers_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/bgp/get_bgp_peers_parameters.go
new file mode 100644
index 000000000..9b4fb3d7c
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/bgp/get_bgp_peers_parameters.go
@@ -0,0 +1,131 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package bgp
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewGetBgpPeersParams creates a new GetBgpPeersParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewGetBgpPeersParams() *GetBgpPeersParams {
+ return &GetBgpPeersParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewGetBgpPeersParamsWithTimeout creates a new GetBgpPeersParams object
+// with the ability to set a timeout on a request.
+func NewGetBgpPeersParamsWithTimeout(timeout time.Duration) *GetBgpPeersParams {
+ return &GetBgpPeersParams{
+ timeout: timeout,
+ }
+}
+
+// NewGetBgpPeersParamsWithContext creates a new GetBgpPeersParams object
+// with the ability to set a context for a request.
+func NewGetBgpPeersParamsWithContext(ctx context.Context) *GetBgpPeersParams {
+ return &GetBgpPeersParams{
+ Context: ctx,
+ }
+}
+
+// NewGetBgpPeersParamsWithHTTPClient creates a new GetBgpPeersParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewGetBgpPeersParamsWithHTTPClient(client *http.Client) *GetBgpPeersParams {
+ return &GetBgpPeersParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+GetBgpPeersParams contains all the parameters to send to the API endpoint
+
+ for the get bgp peers operation.
+
+ Typically these are written to a http.Request.
+*/
+type GetBgpPeersParams struct {
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the get bgp peers params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetBgpPeersParams) WithDefaults() *GetBgpPeersParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the get bgp peers params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetBgpPeersParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the get bgp peers params
+func (o *GetBgpPeersParams) WithTimeout(timeout time.Duration) *GetBgpPeersParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the get bgp peers params
+func (o *GetBgpPeersParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the get bgp peers params
+func (o *GetBgpPeersParams) WithContext(ctx context.Context) *GetBgpPeersParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the get bgp peers params
+func (o *GetBgpPeersParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the get bgp peers params
+func (o *GetBgpPeersParams) WithHTTPClient(client *http.Client) *GetBgpPeersParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the get bgp peers params
+func (o *GetBgpPeersParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *GetBgpPeersParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/bgp/get_bgp_peers_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/bgp/get_bgp_peers_responses.go
new file mode 100644
index 000000000..a1ed8d93c
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/bgp/get_bgp_peers_responses.go
@@ -0,0 +1,233 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package bgp
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/cilium/cilium/api/v1/models"
+)
+
+// GetBgpPeersReader is a Reader for the GetBgpPeers structure.
+type GetBgpPeersReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *GetBgpPeersReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewGetBgpPeersOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 500:
+ result := NewGetBgpPeersInternalServerError()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ case 501:
+ result := NewGetBgpPeersDisabled()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ default:
+ return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code())
+ }
+}
+
+// NewGetBgpPeersOK creates a GetBgpPeersOK with default headers values
+func NewGetBgpPeersOK() *GetBgpPeersOK {
+ return &GetBgpPeersOK{}
+}
+
+/*
+GetBgpPeersOK describes a response with status code 200, with default header values.
+
+Success
+*/
+type GetBgpPeersOK struct {
+ Payload []*models.BgpPeer
+}
+
+// IsSuccess returns true when this get bgp peers o k response has a 2xx status code
+func (o *GetBgpPeersOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this get bgp peers o k response has a 3xx status code
+func (o *GetBgpPeersOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get bgp peers o k response has a 4xx status code
+func (o *GetBgpPeersOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get bgp peers o k response has a 5xx status code
+func (o *GetBgpPeersOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get bgp peers o k response a status code equal to that given
+func (o *GetBgpPeersOK) IsCode(code int) bool {
+ return code == 200
+}
+
+func (o *GetBgpPeersOK) Error() string {
+ return fmt.Sprintf("[GET /bgp/peers][%d] getBgpPeersOK %+v", 200, o.Payload)
+}
+
+func (o *GetBgpPeersOK) String() string {
+ return fmt.Sprintf("[GET /bgp/peers][%d] getBgpPeersOK %+v", 200, o.Payload)
+}
+
+func (o *GetBgpPeersOK) GetPayload() []*models.BgpPeer {
+ return o.Payload
+}
+
+func (o *GetBgpPeersOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewGetBgpPeersInternalServerError creates a GetBgpPeersInternalServerError with default headers values
+func NewGetBgpPeersInternalServerError() *GetBgpPeersInternalServerError {
+ return &GetBgpPeersInternalServerError{}
+}
+
+/*
+GetBgpPeersInternalServerError describes a response with status code 500, with default header values.
+
+Internal Server Error
+*/
+type GetBgpPeersInternalServerError struct {
+ Payload models.Error
+}
+
+// IsSuccess returns true when this get bgp peers internal server error response has a 2xx status code
+func (o *GetBgpPeersInternalServerError) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this get bgp peers internal server error response has a 3xx status code
+func (o *GetBgpPeersInternalServerError) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get bgp peers internal server error response has a 4xx status code
+func (o *GetBgpPeersInternalServerError) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get bgp peers internal server error response has a 5xx status code
+func (o *GetBgpPeersInternalServerError) IsServerError() bool {
+ return true
+}
+
+// IsCode returns true when this get bgp peers internal server error response a status code equal to that given
+func (o *GetBgpPeersInternalServerError) IsCode(code int) bool {
+ return code == 500
+}
+
+func (o *GetBgpPeersInternalServerError) Error() string {
+ return fmt.Sprintf("[GET /bgp/peers][%d] getBgpPeersInternalServerError %+v", 500, o.Payload)
+}
+
+func (o *GetBgpPeersInternalServerError) String() string {
+ return fmt.Sprintf("[GET /bgp/peers][%d] getBgpPeersInternalServerError %+v", 500, o.Payload)
+}
+
+func (o *GetBgpPeersInternalServerError) GetPayload() models.Error {
+ return o.Payload
+}
+
+func (o *GetBgpPeersInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewGetBgpPeersDisabled creates a GetBgpPeersDisabled with default headers values
+func NewGetBgpPeersDisabled() *GetBgpPeersDisabled {
+ return &GetBgpPeersDisabled{}
+}
+
+/*
+GetBgpPeersDisabled describes a response with status code 501, with default header values.
+
+BGP Control Plane disabled
+*/
+type GetBgpPeersDisabled struct {
+ Payload models.Error
+}
+
+// IsSuccess returns true when this get bgp peers disabled response has a 2xx status code
+func (o *GetBgpPeersDisabled) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this get bgp peers disabled response has a 3xx status code
+func (o *GetBgpPeersDisabled) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get bgp peers disabled response has a 4xx status code
+func (o *GetBgpPeersDisabled) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get bgp peers disabled response has a 5xx status code
+func (o *GetBgpPeersDisabled) IsServerError() bool {
+ return true
+}
+
+// IsCode returns true when this get bgp peers disabled response a status code equal to that given
+func (o *GetBgpPeersDisabled) IsCode(code int) bool {
+ return code == 501
+}
+
+func (o *GetBgpPeersDisabled) Error() string {
+ return fmt.Sprintf("[GET /bgp/peers][%d] getBgpPeersDisabled %+v", 501, o.Payload)
+}
+
+func (o *GetBgpPeersDisabled) String() string {
+ return fmt.Sprintf("[GET /bgp/peers][%d] getBgpPeersDisabled %+v", 501, o.Payload)
+}
+
+func (o *GetBgpPeersDisabled) GetPayload() models.Error {
+ return o.Payload
+}
+
+func (o *GetBgpPeersDisabled) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/bgp/get_bgp_routes_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/bgp/get_bgp_routes_parameters.go
new file mode 100644
index 000000000..9fa279b03
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/bgp/get_bgp_routes_parameters.go
@@ -0,0 +1,286 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package bgp
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+)
+
+// NewGetBgpRoutesParams creates a new GetBgpRoutesParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewGetBgpRoutesParams() *GetBgpRoutesParams {
+ return &GetBgpRoutesParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewGetBgpRoutesParamsWithTimeout creates a new GetBgpRoutesParams object
+// with the ability to set a timeout on a request.
+func NewGetBgpRoutesParamsWithTimeout(timeout time.Duration) *GetBgpRoutesParams {
+ return &GetBgpRoutesParams{
+ timeout: timeout,
+ }
+}
+
+// NewGetBgpRoutesParamsWithContext creates a new GetBgpRoutesParams object
+// with the ability to set a context for a request.
+func NewGetBgpRoutesParamsWithContext(ctx context.Context) *GetBgpRoutesParams {
+ return &GetBgpRoutesParams{
+ Context: ctx,
+ }
+}
+
+// NewGetBgpRoutesParamsWithHTTPClient creates a new GetBgpRoutesParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewGetBgpRoutesParamsWithHTTPClient(client *http.Client) *GetBgpRoutesParams {
+ return &GetBgpRoutesParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+GetBgpRoutesParams contains all the parameters to send to the API endpoint
+
+ for the get bgp routes operation.
+
+ Typically these are written to a http.Request.
+*/
+type GetBgpRoutesParams struct {
+
+ /* Afi.
+
+ Address Family Indicator (AFI) of a BGP route
+ */
+ Afi string
+
+ /* Neighbor.
+
+ IP address specifying a BGP neighbor.
+ Has to be specified only when table type is adj-rib-in or adj-rib-out.
+
+ */
+ Neighbor *string
+
+ /* RouterAsn.
+
+ Autonomous System Number (ASN) identifying a BGP virtual router instance.
+ If not specified, all virtual router instances are selected.
+
+ */
+ RouterAsn *int64
+
+ /* Safi.
+
+ Subsequent Address Family Indicator (SAFI) of a BGP route
+ */
+ Safi string
+
+ /* TableType.
+
+ BGP Routing Information Base (RIB) table type
+ */
+ TableType string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the get bgp routes params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetBgpRoutesParams) WithDefaults() *GetBgpRoutesParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the get bgp routes params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetBgpRoutesParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the get bgp routes params
+func (o *GetBgpRoutesParams) WithTimeout(timeout time.Duration) *GetBgpRoutesParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the get bgp routes params
+func (o *GetBgpRoutesParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the get bgp routes params
+func (o *GetBgpRoutesParams) WithContext(ctx context.Context) *GetBgpRoutesParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the get bgp routes params
+func (o *GetBgpRoutesParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the get bgp routes params
+func (o *GetBgpRoutesParams) WithHTTPClient(client *http.Client) *GetBgpRoutesParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the get bgp routes params
+func (o *GetBgpRoutesParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithAfi adds the afi to the get bgp routes params
+func (o *GetBgpRoutesParams) WithAfi(afi string) *GetBgpRoutesParams {
+ o.SetAfi(afi)
+ return o
+}
+
+// SetAfi adds the afi to the get bgp routes params
+func (o *GetBgpRoutesParams) SetAfi(afi string) {
+ o.Afi = afi
+}
+
+// WithNeighbor adds the neighbor to the get bgp routes params
+func (o *GetBgpRoutesParams) WithNeighbor(neighbor *string) *GetBgpRoutesParams {
+ o.SetNeighbor(neighbor)
+ return o
+}
+
+// SetNeighbor adds the neighbor to the get bgp routes params
+func (o *GetBgpRoutesParams) SetNeighbor(neighbor *string) {
+ o.Neighbor = neighbor
+}
+
+// WithRouterAsn adds the routerAsn to the get bgp routes params
+func (o *GetBgpRoutesParams) WithRouterAsn(routerAsn *int64) *GetBgpRoutesParams {
+ o.SetRouterAsn(routerAsn)
+ return o
+}
+
+// SetRouterAsn adds the routerAsn to the get bgp routes params
+func (o *GetBgpRoutesParams) SetRouterAsn(routerAsn *int64) {
+ o.RouterAsn = routerAsn
+}
+
+// WithSafi adds the safi to the get bgp routes params
+func (o *GetBgpRoutesParams) WithSafi(safi string) *GetBgpRoutesParams {
+ o.SetSafi(safi)
+ return o
+}
+
+// SetSafi adds the safi to the get bgp routes params
+func (o *GetBgpRoutesParams) SetSafi(safi string) {
+ o.Safi = safi
+}
+
+// WithTableType adds the tableType to the get bgp routes params
+func (o *GetBgpRoutesParams) WithTableType(tableType string) *GetBgpRoutesParams {
+ o.SetTableType(tableType)
+ return o
+}
+
+// SetTableType adds the tableType to the get bgp routes params
+func (o *GetBgpRoutesParams) SetTableType(tableType string) {
+ o.TableType = tableType
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *GetBgpRoutesParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ // query param afi
+ qrAfi := o.Afi
+ qAfi := qrAfi
+ if qAfi != "" {
+
+ if err := r.SetQueryParam("afi", qAfi); err != nil {
+ return err
+ }
+ }
+
+ if o.Neighbor != nil {
+
+ // query param neighbor
+ var qrNeighbor string
+
+ if o.Neighbor != nil {
+ qrNeighbor = *o.Neighbor
+ }
+ qNeighbor := qrNeighbor
+ if qNeighbor != "" {
+
+ if err := r.SetQueryParam("neighbor", qNeighbor); err != nil {
+ return err
+ }
+ }
+ }
+
+ if o.RouterAsn != nil {
+
+ // query param router_asn
+ var qrRouterAsn int64
+
+ if o.RouterAsn != nil {
+ qrRouterAsn = *o.RouterAsn
+ }
+ qRouterAsn := swag.FormatInt64(qrRouterAsn)
+ if qRouterAsn != "" {
+
+ if err := r.SetQueryParam("router_asn", qRouterAsn); err != nil {
+ return err
+ }
+ }
+ }
+
+ // query param safi
+ qrSafi := o.Safi
+ qSafi := qrSafi
+ if qSafi != "" {
+
+ if err := r.SetQueryParam("safi", qSafi); err != nil {
+ return err
+ }
+ }
+
+ // query param table_type
+ qrTableType := o.TableType
+ qTableType := qrTableType
+ if qTableType != "" {
+
+ if err := r.SetQueryParam("table_type", qTableType); err != nil {
+ return err
+ }
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/bgp/get_bgp_routes_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/bgp/get_bgp_routes_responses.go
new file mode 100644
index 000000000..f7211a467
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/bgp/get_bgp_routes_responses.go
@@ -0,0 +1,233 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package bgp
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/cilium/cilium/api/v1/models"
+)
+
+// GetBgpRoutesReader is a Reader for the GetBgpRoutes structure.
+type GetBgpRoutesReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *GetBgpRoutesReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewGetBgpRoutesOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 500:
+ result := NewGetBgpRoutesInternalServerError()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ case 501:
+ result := NewGetBgpRoutesDisabled()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ default:
+ return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code())
+ }
+}
+
+// NewGetBgpRoutesOK creates a GetBgpRoutesOK with default headers values
+func NewGetBgpRoutesOK() *GetBgpRoutesOK {
+ return &GetBgpRoutesOK{}
+}
+
+/*
+GetBgpRoutesOK describes a response with status code 200, with default header values.
+
+Success
+*/
+type GetBgpRoutesOK struct {
+ Payload []*models.BgpRoute
+}
+
+// IsSuccess returns true when this get bgp routes o k response has a 2xx status code
+func (o *GetBgpRoutesOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this get bgp routes o k response has a 3xx status code
+func (o *GetBgpRoutesOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get bgp routes o k response has a 4xx status code
+func (o *GetBgpRoutesOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get bgp routes o k response has a 5xx status code
+func (o *GetBgpRoutesOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get bgp routes o k response a status code equal to that given
+func (o *GetBgpRoutesOK) IsCode(code int) bool {
+ return code == 200
+}
+
+func (o *GetBgpRoutesOK) Error() string {
+ return fmt.Sprintf("[GET /bgp/routes][%d] getBgpRoutesOK %+v", 200, o.Payload)
+}
+
+func (o *GetBgpRoutesOK) String() string {
+ return fmt.Sprintf("[GET /bgp/routes][%d] getBgpRoutesOK %+v", 200, o.Payload)
+}
+
+func (o *GetBgpRoutesOK) GetPayload() []*models.BgpRoute {
+ return o.Payload
+}
+
+func (o *GetBgpRoutesOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewGetBgpRoutesInternalServerError creates a GetBgpRoutesInternalServerError with default headers values
+func NewGetBgpRoutesInternalServerError() *GetBgpRoutesInternalServerError {
+ return &GetBgpRoutesInternalServerError{}
+}
+
+/*
+GetBgpRoutesInternalServerError describes a response with status code 500, with default header values.
+
+Internal Server Error
+*/
+type GetBgpRoutesInternalServerError struct {
+ Payload models.Error
+}
+
+// IsSuccess returns true when this get bgp routes internal server error response has a 2xx status code
+func (o *GetBgpRoutesInternalServerError) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this get bgp routes internal server error response has a 3xx status code
+func (o *GetBgpRoutesInternalServerError) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get bgp routes internal server error response has a 4xx status code
+func (o *GetBgpRoutesInternalServerError) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get bgp routes internal server error response has a 5xx status code
+func (o *GetBgpRoutesInternalServerError) IsServerError() bool {
+ return true
+}
+
+// IsCode returns true when this get bgp routes internal server error response a status code equal to that given
+func (o *GetBgpRoutesInternalServerError) IsCode(code int) bool {
+ return code == 500
+}
+
+func (o *GetBgpRoutesInternalServerError) Error() string {
+ return fmt.Sprintf("[GET /bgp/routes][%d] getBgpRoutesInternalServerError %+v", 500, o.Payload)
+}
+
+func (o *GetBgpRoutesInternalServerError) String() string {
+ return fmt.Sprintf("[GET /bgp/routes][%d] getBgpRoutesInternalServerError %+v", 500, o.Payload)
+}
+
+func (o *GetBgpRoutesInternalServerError) GetPayload() models.Error {
+ return o.Payload
+}
+
+func (o *GetBgpRoutesInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewGetBgpRoutesDisabled creates a GetBgpRoutesDisabled with default headers values
+func NewGetBgpRoutesDisabled() *GetBgpRoutesDisabled {
+ return &GetBgpRoutesDisabled{}
+}
+
+/*
+GetBgpRoutesDisabled describes a response with status code 501, with default header values.
+
+BGP Control Plane disabled
+*/
+type GetBgpRoutesDisabled struct {
+ Payload models.Error
+}
+
+// IsSuccess returns true when this get bgp routes disabled response has a 2xx status code
+func (o *GetBgpRoutesDisabled) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this get bgp routes disabled response has a 3xx status code
+func (o *GetBgpRoutesDisabled) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get bgp routes disabled response has a 4xx status code
+func (o *GetBgpRoutesDisabled) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get bgp routes disabled response has a 5xx status code
+func (o *GetBgpRoutesDisabled) IsServerError() bool {
+ return true
+}
+
+// IsCode returns true when this get bgp routes disabled response a status code equal to that given
+func (o *GetBgpRoutesDisabled) IsCode(code int) bool {
+ return code == 501
+}
+
+func (o *GetBgpRoutesDisabled) Error() string {
+ return fmt.Sprintf("[GET /bgp/routes][%d] getBgpRoutesDisabled %+v", 501, o.Payload)
+}
+
+func (o *GetBgpRoutesDisabled) String() string {
+ return fmt.Sprintf("[GET /bgp/routes][%d] getBgpRoutesDisabled %+v", 501, o.Payload)
+}
+
+func (o *GetBgpRoutesDisabled) GetPayload() models.Error {
+ return o.Payload
+}
+
+func (o *GetBgpRoutesDisabled) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/cilium_api_client.go b/vendor/github.com/cilium/cilium/api/v1/client/cilium_api_client.go
new file mode 100644
index 000000000..109454afa
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/cilium_api_client.go
@@ -0,0 +1,160 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package client
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "github.com/go-openapi/runtime"
+ httptransport "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/cilium/cilium/api/v1/client/bgp"
+ "github.com/cilium/cilium/api/v1/client/daemon"
+ "github.com/cilium/cilium/api/v1/client/endpoint"
+ "github.com/cilium/cilium/api/v1/client/ipam"
+ "github.com/cilium/cilium/api/v1/client/metrics"
+ "github.com/cilium/cilium/api/v1/client/policy"
+ "github.com/cilium/cilium/api/v1/client/prefilter"
+ "github.com/cilium/cilium/api/v1/client/recorder"
+ "github.com/cilium/cilium/api/v1/client/service"
+ "github.com/cilium/cilium/api/v1/client/statedb"
+)
+
+// Default cilium API HTTP client.
+var Default = NewHTTPClient(nil)
+
+const (
+ // DefaultHost is the default Host
+ // found in Meta (info) section of spec file
+ DefaultHost string = "localhost"
+ // DefaultBasePath is the default BasePath
+ // found in Meta (info) section of spec file
+ DefaultBasePath string = "/v1"
+)
+
+// DefaultSchemes are the default schemes found in Meta (info) section of spec file
+var DefaultSchemes = []string{"http"}
+
+// NewHTTPClient creates a new cilium API HTTP client.
+func NewHTTPClient(formats strfmt.Registry) *CiliumAPI {
+ return NewHTTPClientWithConfig(formats, nil)
+}
+
+// NewHTTPClientWithConfig creates a new cilium API HTTP client,
+// using a customizable transport config.
+func NewHTTPClientWithConfig(formats strfmt.Registry, cfg *TransportConfig) *CiliumAPI {
+ // ensure nullable parameters have default
+ if cfg == nil {
+ cfg = DefaultTransportConfig()
+ }
+
+ // create transport and client
+ transport := httptransport.New(cfg.Host, cfg.BasePath, cfg.Schemes)
+ return New(transport, formats)
+}
+
+// New creates a new cilium API client
+func New(transport runtime.ClientTransport, formats strfmt.Registry) *CiliumAPI {
+ // ensure nullable parameters have default
+ if formats == nil {
+ formats = strfmt.Default
+ }
+
+ cli := new(CiliumAPI)
+ cli.Transport = transport
+ cli.Bgp = bgp.New(transport, formats)
+ cli.Daemon = daemon.New(transport, formats)
+ cli.Endpoint = endpoint.New(transport, formats)
+ cli.Ipam = ipam.New(transport, formats)
+ cli.Metrics = metrics.New(transport, formats)
+ cli.Policy = policy.New(transport, formats)
+ cli.Prefilter = prefilter.New(transport, formats)
+ cli.Recorder = recorder.New(transport, formats)
+ cli.Service = service.New(transport, formats)
+ cli.Statedb = statedb.New(transport, formats)
+ return cli
+}
+
+// DefaultTransportConfig creates a TransportConfig with the
+// default settings taken from the meta section of the spec file.
+func DefaultTransportConfig() *TransportConfig {
+ return &TransportConfig{
+ Host: DefaultHost,
+ BasePath: DefaultBasePath,
+ Schemes: DefaultSchemes,
+ }
+}
+
+// TransportConfig contains the transport related info,
+// found in the meta section of the spec file.
+type TransportConfig struct {
+ Host string
+ BasePath string
+ Schemes []string
+}
+
+// WithHost overrides the default host,
+// provided by the meta section of the spec file.
+func (cfg *TransportConfig) WithHost(host string) *TransportConfig {
+ cfg.Host = host
+ return cfg
+}
+
+// WithBasePath overrides the default basePath,
+// provided by the meta section of the spec file.
+func (cfg *TransportConfig) WithBasePath(basePath string) *TransportConfig {
+ cfg.BasePath = basePath
+ return cfg
+}
+
+// WithSchemes overrides the default schemes,
+// provided by the meta section of the spec file.
+func (cfg *TransportConfig) WithSchemes(schemes []string) *TransportConfig {
+ cfg.Schemes = schemes
+ return cfg
+}
+
+// CiliumAPI is a client for cilium API
+type CiliumAPI struct {
+ Bgp bgp.ClientService
+
+ Daemon daemon.ClientService
+
+ Endpoint endpoint.ClientService
+
+ Ipam ipam.ClientService
+
+ Metrics metrics.ClientService
+
+ Policy policy.ClientService
+
+ Prefilter prefilter.ClientService
+
+ Recorder recorder.ClientService
+
+ Service service.ClientService
+
+ Statedb statedb.ClientService
+
+ Transport runtime.ClientTransport
+}
+
+// SetTransport changes the transport on the client and all its subresources
+func (c *CiliumAPI) SetTransport(transport runtime.ClientTransport) {
+ c.Transport = transport
+ c.Bgp.SetTransport(transport)
+ c.Daemon.SetTransport(transport)
+ c.Endpoint.SetTransport(transport)
+ c.Ipam.SetTransport(transport)
+ c.Metrics.SetTransport(transport)
+ c.Policy.SetTransport(transport)
+ c.Prefilter.SetTransport(transport)
+ c.Recorder.SetTransport(transport)
+ c.Service.SetTransport(transport)
+ c.Statedb.SetTransport(transport)
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/daemon/daemon_client.go b/vendor/github.com/cilium/cilium/api/v1/client/daemon/daemon_client.go
new file mode 100644
index 000000000..650a1cf38
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/daemon/daemon_client.go
@@ -0,0 +1,501 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package daemon
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+)
+
+// New creates a new daemon API client.
+func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService {
+ return &Client{transport: transport, formats: formats}
+}
+
+/*
+Client for daemon API
+*/
+type Client struct {
+ transport runtime.ClientTransport
+ formats strfmt.Registry
+}
+
+// ClientOption is the option for Client methods
+type ClientOption func(*runtime.ClientOperation)
+
+// ClientService is the interface for Client methods
+type ClientService interface {
+ GetCgroupDumpMetadata(params *GetCgroupDumpMetadataParams, opts ...ClientOption) (*GetCgroupDumpMetadataOK, error)
+
+ GetClusterNodes(params *GetClusterNodesParams, opts ...ClientOption) (*GetClusterNodesOK, error)
+
+ GetConfig(params *GetConfigParams, opts ...ClientOption) (*GetConfigOK, error)
+
+ GetDebuginfo(params *GetDebuginfoParams, opts ...ClientOption) (*GetDebuginfoOK, error)
+
+ GetHealth(params *GetHealthParams, opts ...ClientOption) (*GetHealthOK, error)
+
+ GetHealthz(params *GetHealthzParams, opts ...ClientOption) (*GetHealthzOK, error)
+
+ GetMap(params *GetMapParams, opts ...ClientOption) (*GetMapOK, error)
+
+ GetMapName(params *GetMapNameParams, opts ...ClientOption) (*GetMapNameOK, error)
+
+ GetMapNameEvents(params *GetMapNameEventsParams, writer io.Writer, opts ...ClientOption) (*GetMapNameEventsOK, error)
+
+ GetNodeIds(params *GetNodeIdsParams, opts ...ClientOption) (*GetNodeIdsOK, error)
+
+ PatchConfig(params *PatchConfigParams, opts ...ClientOption) (*PatchConfigOK, error)
+
+ SetTransport(transport runtime.ClientTransport)
+}
+
+/*
+GetCgroupDumpMetadata retrieves cgroup metadata for all pods
+*/
+func (a *Client) GetCgroupDumpMetadata(params *GetCgroupDumpMetadataParams, opts ...ClientOption) (*GetCgroupDumpMetadataOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewGetCgroupDumpMetadataParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "GetCgroupDumpMetadata",
+ Method: "GET",
+ PathPattern: "/cgroup-dump-metadata",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &GetCgroupDumpMetadataReader{formats: a.formats},
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*GetCgroupDumpMetadataOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for GetCgroupDumpMetadata: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+/*
+GetClusterNodes gets nodes information stored in the cilium agent
+*/
+func (a *Client) GetClusterNodes(params *GetClusterNodesParams, opts ...ClientOption) (*GetClusterNodesOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewGetClusterNodesParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "GetClusterNodes",
+ Method: "GET",
+ PathPattern: "/cluster/nodes",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &GetClusterNodesReader{formats: a.formats},
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*GetClusterNodesOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for GetClusterNodes: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+/*
+GetConfig gets configuration of cilium daemon
+
+Returns the configuration of the Cilium daemon.
+*/
+func (a *Client) GetConfig(params *GetConfigParams, opts ...ClientOption) (*GetConfigOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewGetConfigParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "GetConfig",
+ Method: "GET",
+ PathPattern: "/config",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &GetConfigReader{formats: a.formats},
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*GetConfigOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for GetConfig: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+/*
+GetDebuginfo retrieves information about the agent and environment for debugging
+*/
+func (a *Client) GetDebuginfo(params *GetDebuginfoParams, opts ...ClientOption) (*GetDebuginfoOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewGetDebuginfoParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "GetDebuginfo",
+ Method: "GET",
+ PathPattern: "/debuginfo",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &GetDebuginfoReader{formats: a.formats},
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*GetDebuginfoOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for GetDebuginfo: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+/*
+GetHealth gets modules health of cilium daemon
+
+Returns modules health and status information of the Cilium daemon.
+*/
+func (a *Client) GetHealth(params *GetHealthParams, opts ...ClientOption) (*GetHealthOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewGetHealthParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "GetHealth",
+ Method: "GET",
+ PathPattern: "/health",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &GetHealthReader{formats: a.formats},
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*GetHealthOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for GetHealth: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+/*
+ GetHealthz gets health of cilium daemon
+
+ Returns health and status information of the Cilium daemon and related
+
+components such as the local container runtime, connected datastore,
+Kubernetes integration and Hubble.
+*/
+func (a *Client) GetHealthz(params *GetHealthzParams, opts ...ClientOption) (*GetHealthzOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewGetHealthzParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "GetHealthz",
+ Method: "GET",
+ PathPattern: "/healthz",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &GetHealthzReader{formats: a.formats},
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*GetHealthzOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for GetHealthz: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+/*
+GetMap lists all open maps
+*/
+func (a *Client) GetMap(params *GetMapParams, opts ...ClientOption) (*GetMapOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewGetMapParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "GetMap",
+ Method: "GET",
+ PathPattern: "/map",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &GetMapReader{formats: a.formats},
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*GetMapOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for GetMap: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+/*
+GetMapName retrieves contents of b p f map
+*/
+func (a *Client) GetMapName(params *GetMapNameParams, opts ...ClientOption) (*GetMapNameOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewGetMapNameParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "GetMapName",
+ Method: "GET",
+ PathPattern: "/map/{name}",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &GetMapNameReader{formats: a.formats},
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*GetMapNameOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for GetMapName: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+/*
+GetMapNameEvents retrieves the recent event logs associated with this endpoint
+*/
+func (a *Client) GetMapNameEvents(params *GetMapNameEventsParams, writer io.Writer, opts ...ClientOption) (*GetMapNameEventsOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewGetMapNameEventsParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "GetMapNameEvents",
+ Method: "GET",
+ PathPattern: "/map/{name}/events",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &GetMapNameEventsReader{formats: a.formats, writer: writer},
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*GetMapNameEventsOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for GetMapNameEvents: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+/*
+ GetNodeIds lists information about known node i ds
+
+ Retrieves a list of node IDs allocated by the agent and their
+
+associated node IP addresses.
+*/
+func (a *Client) GetNodeIds(params *GetNodeIdsParams, opts ...ClientOption) (*GetNodeIdsOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewGetNodeIdsParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "GetNodeIds",
+ Method: "GET",
+ PathPattern: "/node/ids",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &GetNodeIdsReader{formats: a.formats},
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*GetNodeIdsOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for GetNodeIds: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+/*
+ PatchConfig modifies daemon configuration
+
+ Updates the daemon configuration by applying the provided
+
+ConfigurationMap and regenerates & recompiles all required datapath
+components.
+*/
+func (a *Client) PatchConfig(params *PatchConfigParams, opts ...ClientOption) (*PatchConfigOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewPatchConfigParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "PatchConfig",
+ Method: "PATCH",
+ PathPattern: "/config",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &PatchConfigReader{formats: a.formats},
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*PatchConfigOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for PatchConfig: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+// SetTransport changes the transport on the client
+func (a *Client) SetTransport(transport runtime.ClientTransport) {
+ a.transport = transport
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_cgroup_dump_metadata_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_cgroup_dump_metadata_parameters.go
new file mode 100644
index 000000000..70b51046c
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_cgroup_dump_metadata_parameters.go
@@ -0,0 +1,131 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package daemon
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewGetCgroupDumpMetadataParams creates a new GetCgroupDumpMetadataParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewGetCgroupDumpMetadataParams() *GetCgroupDumpMetadataParams {
+ return &GetCgroupDumpMetadataParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewGetCgroupDumpMetadataParamsWithTimeout creates a new GetCgroupDumpMetadataParams object
+// with the ability to set a timeout on a request.
+func NewGetCgroupDumpMetadataParamsWithTimeout(timeout time.Duration) *GetCgroupDumpMetadataParams {
+ return &GetCgroupDumpMetadataParams{
+ timeout: timeout,
+ }
+}
+
+// NewGetCgroupDumpMetadataParamsWithContext creates a new GetCgroupDumpMetadataParams object
+// with the ability to set a context for a request.
+func NewGetCgroupDumpMetadataParamsWithContext(ctx context.Context) *GetCgroupDumpMetadataParams {
+ return &GetCgroupDumpMetadataParams{
+ Context: ctx,
+ }
+}
+
+// NewGetCgroupDumpMetadataParamsWithHTTPClient creates a new GetCgroupDumpMetadataParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewGetCgroupDumpMetadataParamsWithHTTPClient(client *http.Client) *GetCgroupDumpMetadataParams {
+ return &GetCgroupDumpMetadataParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+GetCgroupDumpMetadataParams contains all the parameters to send to the API endpoint
+
+ for the get cgroup dump metadata operation.
+
+ Typically these are written to a http.Request.
+*/
+type GetCgroupDumpMetadataParams struct {
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the get cgroup dump metadata params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetCgroupDumpMetadataParams) WithDefaults() *GetCgroupDumpMetadataParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the get cgroup dump metadata params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetCgroupDumpMetadataParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the get cgroup dump metadata params
+func (o *GetCgroupDumpMetadataParams) WithTimeout(timeout time.Duration) *GetCgroupDumpMetadataParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the get cgroup dump metadata params
+func (o *GetCgroupDumpMetadataParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the get cgroup dump metadata params
+func (o *GetCgroupDumpMetadataParams) WithContext(ctx context.Context) *GetCgroupDumpMetadataParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the get cgroup dump metadata params
+func (o *GetCgroupDumpMetadataParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the get cgroup dump metadata params
+func (o *GetCgroupDumpMetadataParams) WithHTTPClient(client *http.Client) *GetCgroupDumpMetadataParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the get cgroup dump metadata params
+func (o *GetCgroupDumpMetadataParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *GetCgroupDumpMetadataParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_cgroup_dump_metadata_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_cgroup_dump_metadata_responses.go
new file mode 100644
index 000000000..3c16b3697
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_cgroup_dump_metadata_responses.go
@@ -0,0 +1,168 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package daemon
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/cilium/cilium/api/v1/models"
+)
+
+// GetCgroupDumpMetadataReader is a Reader for the GetCgroupDumpMetadata structure.
+type GetCgroupDumpMetadataReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *GetCgroupDumpMetadataReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewGetCgroupDumpMetadataOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 500:
+ result := NewGetCgroupDumpMetadataFailure()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ default:
+ return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code())
+ }
+}
+
+// NewGetCgroupDumpMetadataOK creates a GetCgroupDumpMetadataOK with default headers values
+func NewGetCgroupDumpMetadataOK() *GetCgroupDumpMetadataOK {
+ return &GetCgroupDumpMetadataOK{}
+}
+
+/*
+GetCgroupDumpMetadataOK describes a response with status code 200, with default header values.
+
+Success
+*/
+type GetCgroupDumpMetadataOK struct {
+ Payload *models.CgroupDumpMetadata
+}
+
+// IsSuccess returns true when this get cgroup dump metadata o k response has a 2xx status code
+func (o *GetCgroupDumpMetadataOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this get cgroup dump metadata o k response has a 3xx status code
+func (o *GetCgroupDumpMetadataOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get cgroup dump metadata o k response has a 4xx status code
+func (o *GetCgroupDumpMetadataOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get cgroup dump metadata o k response has a 5xx status code
+func (o *GetCgroupDumpMetadataOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get cgroup dump metadata o k response a status code equal to that given
+func (o *GetCgroupDumpMetadataOK) IsCode(code int) bool {
+ return code == 200
+}
+
+func (o *GetCgroupDumpMetadataOK) Error() string {
+ return fmt.Sprintf("[GET /cgroup-dump-metadata][%d] getCgroupDumpMetadataOK %+v", 200, o.Payload)
+}
+
+func (o *GetCgroupDumpMetadataOK) String() string {
+ return fmt.Sprintf("[GET /cgroup-dump-metadata][%d] getCgroupDumpMetadataOK %+v", 200, o.Payload)
+}
+
+func (o *GetCgroupDumpMetadataOK) GetPayload() *models.CgroupDumpMetadata {
+ return o.Payload
+}
+
+func (o *GetCgroupDumpMetadataOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ o.Payload = new(models.CgroupDumpMetadata)
+
+ // response payload
+ if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewGetCgroupDumpMetadataFailure creates a GetCgroupDumpMetadataFailure with default headers values
+func NewGetCgroupDumpMetadataFailure() *GetCgroupDumpMetadataFailure {
+ return &GetCgroupDumpMetadataFailure{}
+}
+
+/*
+GetCgroupDumpMetadataFailure describes a response with status code 500, with default header values.
+
+CgroupDumpMetadata get failed
+*/
+type GetCgroupDumpMetadataFailure struct {
+ Payload models.Error
+}
+
+// IsSuccess returns true when this get cgroup dump metadata failure response has a 2xx status code
+func (o *GetCgroupDumpMetadataFailure) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this get cgroup dump metadata failure response has a 3xx status code
+func (o *GetCgroupDumpMetadataFailure) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get cgroup dump metadata failure response has a 4xx status code
+func (o *GetCgroupDumpMetadataFailure) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get cgroup dump metadata failure response has a 5xx status code
+func (o *GetCgroupDumpMetadataFailure) IsServerError() bool {
+ return true
+}
+
+// IsCode returns true when this get cgroup dump metadata failure response a status code equal to that given
+func (o *GetCgroupDumpMetadataFailure) IsCode(code int) bool {
+ return code == 500
+}
+
+func (o *GetCgroupDumpMetadataFailure) Error() string {
+ return fmt.Sprintf("[GET /cgroup-dump-metadata][%d] getCgroupDumpMetadataFailure %+v", 500, o.Payload)
+}
+
+func (o *GetCgroupDumpMetadataFailure) String() string {
+ return fmt.Sprintf("[GET /cgroup-dump-metadata][%d] getCgroupDumpMetadataFailure %+v", 500, o.Payload)
+}
+
+func (o *GetCgroupDumpMetadataFailure) GetPayload() models.Error {
+ return o.Payload
+}
+
+func (o *GetCgroupDumpMetadataFailure) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_cluster_nodes_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_cluster_nodes_parameters.go
new file mode 100644
index 000000000..f99962195
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_cluster_nodes_parameters.go
@@ -0,0 +1,161 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package daemon
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+)
+
+// NewGetClusterNodesParams creates a new GetClusterNodesParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewGetClusterNodesParams() *GetClusterNodesParams {
+ return &GetClusterNodesParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewGetClusterNodesParamsWithTimeout creates a new GetClusterNodesParams object
+// with the ability to set a timeout on a request.
+func NewGetClusterNodesParamsWithTimeout(timeout time.Duration) *GetClusterNodesParams {
+ return &GetClusterNodesParams{
+ timeout: timeout,
+ }
+}
+
+// NewGetClusterNodesParamsWithContext creates a new GetClusterNodesParams object
+// with the ability to set a context for a request.
+func NewGetClusterNodesParamsWithContext(ctx context.Context) *GetClusterNodesParams {
+ return &GetClusterNodesParams{
+ Context: ctx,
+ }
+}
+
+// NewGetClusterNodesParamsWithHTTPClient creates a new GetClusterNodesParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewGetClusterNodesParamsWithHTTPClient(client *http.Client) *GetClusterNodesParams {
+ return &GetClusterNodesParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+GetClusterNodesParams contains all the parameters to send to the API endpoint
+
+ for the get cluster nodes operation.
+
+ Typically these are written to a http.Request.
+*/
+type GetClusterNodesParams struct {
+
+ /* ClientID.
+
+ Client UUID should be used when the client wants to request
+ a diff of nodes added and / or removed since the last time
+ that client has made a request.
+
+ */
+ ClientID *int64
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the get cluster nodes params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetClusterNodesParams) WithDefaults() *GetClusterNodesParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the get cluster nodes params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetClusterNodesParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the get cluster nodes params
+func (o *GetClusterNodesParams) WithTimeout(timeout time.Duration) *GetClusterNodesParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the get cluster nodes params
+func (o *GetClusterNodesParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the get cluster nodes params
+func (o *GetClusterNodesParams) WithContext(ctx context.Context) *GetClusterNodesParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the get cluster nodes params
+func (o *GetClusterNodesParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the get cluster nodes params
+func (o *GetClusterNodesParams) WithHTTPClient(client *http.Client) *GetClusterNodesParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the get cluster nodes params
+func (o *GetClusterNodesParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithClientID adds the clientID to the get cluster nodes params
+func (o *GetClusterNodesParams) WithClientID(clientID *int64) *GetClusterNodesParams {
+ o.SetClientID(clientID)
+ return o
+}
+
+// SetClientID adds the clientId to the get cluster nodes params
+func (o *GetClusterNodesParams) SetClientID(clientID *int64) {
+ o.ClientID = clientID
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *GetClusterNodesParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ if o.ClientID != nil {
+
+ // header param client-id
+ if err := r.SetHeaderParam("client-id", swag.FormatInt64(*o.ClientID)); err != nil {
+ return err
+ }
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_cluster_nodes_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_cluster_nodes_responses.go
new file mode 100644
index 000000000..2f767bbf5
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_cluster_nodes_responses.go
@@ -0,0 +1,101 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package daemon
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/cilium/cilium/api/v1/models"
+)
+
+// GetClusterNodesReader is a Reader for the GetClusterNodes structure.
+type GetClusterNodesReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *GetClusterNodesReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewGetClusterNodesOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code())
+ }
+}
+
+// NewGetClusterNodesOK creates a GetClusterNodesOK with default headers values
+func NewGetClusterNodesOK() *GetClusterNodesOK {
+ return &GetClusterNodesOK{}
+}
+
+/*
+GetClusterNodesOK describes a response with status code 200, with default header values.
+
+Success
+*/
+type GetClusterNodesOK struct {
+ Payload *models.ClusterNodeStatus
+}
+
+// IsSuccess returns true when this get cluster nodes o k response has a 2xx status code
+func (o *GetClusterNodesOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this get cluster nodes o k response has a 3xx status code
+func (o *GetClusterNodesOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get cluster nodes o k response has a 4xx status code
+func (o *GetClusterNodesOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get cluster nodes o k response has a 5xx status code
+func (o *GetClusterNodesOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get cluster nodes o k response a status code equal to that given
+func (o *GetClusterNodesOK) IsCode(code int) bool {
+ return code == 200
+}
+
+func (o *GetClusterNodesOK) Error() string {
+ return fmt.Sprintf("[GET /cluster/nodes][%d] getClusterNodesOK %+v", 200, o.Payload)
+}
+
+func (o *GetClusterNodesOK) String() string {
+ return fmt.Sprintf("[GET /cluster/nodes][%d] getClusterNodesOK %+v", 200, o.Payload)
+}
+
+func (o *GetClusterNodesOK) GetPayload() *models.ClusterNodeStatus {
+ return o.Payload
+}
+
+func (o *GetClusterNodesOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ o.Payload = new(models.ClusterNodeStatus)
+
+ // response payload
+ if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_config_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_config_parameters.go
new file mode 100644
index 000000000..e741bd356
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_config_parameters.go
@@ -0,0 +1,131 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package daemon
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewGetConfigParams creates a new GetConfigParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewGetConfigParams() *GetConfigParams {
+ return &GetConfigParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewGetConfigParamsWithTimeout creates a new GetConfigParams object
+// with the ability to set a timeout on a request.
+func NewGetConfigParamsWithTimeout(timeout time.Duration) *GetConfigParams {
+ return &GetConfigParams{
+ timeout: timeout,
+ }
+}
+
+// NewGetConfigParamsWithContext creates a new GetConfigParams object
+// with the ability to set a context for a request.
+func NewGetConfigParamsWithContext(ctx context.Context) *GetConfigParams {
+ return &GetConfigParams{
+ Context: ctx,
+ }
+}
+
+// NewGetConfigParamsWithHTTPClient creates a new GetConfigParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewGetConfigParamsWithHTTPClient(client *http.Client) *GetConfigParams {
+ return &GetConfigParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+GetConfigParams contains all the parameters to send to the API endpoint
+
+ for the get config operation.
+
+ Typically these are written to a http.Request.
+*/
+type GetConfigParams struct {
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the get config params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetConfigParams) WithDefaults() *GetConfigParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the get config params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetConfigParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the get config params
+func (o *GetConfigParams) WithTimeout(timeout time.Duration) *GetConfigParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the get config params
+func (o *GetConfigParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the get config params
+func (o *GetConfigParams) WithContext(ctx context.Context) *GetConfigParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the get config params
+func (o *GetConfigParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the get config params
+func (o *GetConfigParams) WithHTTPClient(client *http.Client) *GetConfigParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the get config params
+func (o *GetConfigParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *GetConfigParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_config_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_config_responses.go
new file mode 100644
index 000000000..10da9ace5
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_config_responses.go
@@ -0,0 +1,101 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package daemon
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/cilium/cilium/api/v1/models"
+)
+
+// GetConfigReader is a Reader for the GetConfig structure.
+type GetConfigReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *GetConfigReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewGetConfigOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code())
+ }
+}
+
+// NewGetConfigOK creates a GetConfigOK with default headers values
+func NewGetConfigOK() *GetConfigOK {
+ return &GetConfigOK{}
+}
+
+/*
+GetConfigOK describes a response with status code 200, with default header values.
+
+Success
+*/
+type GetConfigOK struct {
+ Payload *models.DaemonConfiguration
+}
+
+// IsSuccess returns true when this get config o k response has a 2xx status code
+func (o *GetConfigOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this get config o k response has a 3xx status code
+func (o *GetConfigOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get config o k response has a 4xx status code
+func (o *GetConfigOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get config o k response has a 5xx status code
+func (o *GetConfigOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get config o k response a status code equal to that given
+func (o *GetConfigOK) IsCode(code int) bool {
+ return code == 200
+}
+
+func (o *GetConfigOK) Error() string {
+ return fmt.Sprintf("[GET /config][%d] getConfigOK %+v", 200, o.Payload)
+}
+
+func (o *GetConfigOK) String() string {
+ return fmt.Sprintf("[GET /config][%d] getConfigOK %+v", 200, o.Payload)
+}
+
+func (o *GetConfigOK) GetPayload() *models.DaemonConfiguration {
+ return o.Payload
+}
+
+func (o *GetConfigOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ o.Payload = new(models.DaemonConfiguration)
+
+ // response payload
+ if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_debuginfo_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_debuginfo_parameters.go
new file mode 100644
index 000000000..28eed7d3a
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_debuginfo_parameters.go
@@ -0,0 +1,131 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package daemon
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewGetDebuginfoParams creates a new GetDebuginfoParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewGetDebuginfoParams() *GetDebuginfoParams {
+ return &GetDebuginfoParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewGetDebuginfoParamsWithTimeout creates a new GetDebuginfoParams object
+// with the ability to set a timeout on a request.
+func NewGetDebuginfoParamsWithTimeout(timeout time.Duration) *GetDebuginfoParams {
+ return &GetDebuginfoParams{
+ timeout: timeout,
+ }
+}
+
+// NewGetDebuginfoParamsWithContext creates a new GetDebuginfoParams object
+// with the ability to set a context for a request.
+func NewGetDebuginfoParamsWithContext(ctx context.Context) *GetDebuginfoParams {
+ return &GetDebuginfoParams{
+ Context: ctx,
+ }
+}
+
+// NewGetDebuginfoParamsWithHTTPClient creates a new GetDebuginfoParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewGetDebuginfoParamsWithHTTPClient(client *http.Client) *GetDebuginfoParams {
+ return &GetDebuginfoParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+GetDebuginfoParams contains all the parameters to send to the API endpoint
+
+ for the get debuginfo operation.
+
+ Typically these are written to a http.Request.
+*/
+type GetDebuginfoParams struct {
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the get debuginfo params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetDebuginfoParams) WithDefaults() *GetDebuginfoParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the get debuginfo params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetDebuginfoParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the get debuginfo params
+func (o *GetDebuginfoParams) WithTimeout(timeout time.Duration) *GetDebuginfoParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the get debuginfo params
+func (o *GetDebuginfoParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the get debuginfo params
+func (o *GetDebuginfoParams) WithContext(ctx context.Context) *GetDebuginfoParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the get debuginfo params
+func (o *GetDebuginfoParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the get debuginfo params
+func (o *GetDebuginfoParams) WithHTTPClient(client *http.Client) *GetDebuginfoParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the get debuginfo params
+func (o *GetDebuginfoParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *GetDebuginfoParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_debuginfo_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_debuginfo_responses.go
new file mode 100644
index 000000000..a7fa07a31
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_debuginfo_responses.go
@@ -0,0 +1,168 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package daemon
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/cilium/cilium/api/v1/models"
+)
+
+// GetDebuginfoReader is a Reader for the GetDebuginfo structure.
+type GetDebuginfoReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *GetDebuginfoReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewGetDebuginfoOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 500:
+ result := NewGetDebuginfoFailure()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ default:
+ return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code())
+ }
+}
+
+// NewGetDebuginfoOK creates a GetDebuginfoOK with default headers values
+func NewGetDebuginfoOK() *GetDebuginfoOK {
+ return &GetDebuginfoOK{}
+}
+
+/*
+GetDebuginfoOK describes a response with status code 200, with default header values.
+
+Success
+*/
+type GetDebuginfoOK struct {
+ Payload *models.DebugInfo
+}
+
+// IsSuccess returns true when this get debuginfo o k response has a 2xx status code
+func (o *GetDebuginfoOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this get debuginfo o k response has a 3xx status code
+func (o *GetDebuginfoOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get debuginfo o k response has a 4xx status code
+func (o *GetDebuginfoOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get debuginfo o k response has a 5xx status code
+func (o *GetDebuginfoOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get debuginfo o k response a status code equal to that given
+func (o *GetDebuginfoOK) IsCode(code int) bool {
+ return code == 200
+}
+
+func (o *GetDebuginfoOK) Error() string {
+ return fmt.Sprintf("[GET /debuginfo][%d] getDebuginfoOK %+v", 200, o.Payload)
+}
+
+func (o *GetDebuginfoOK) String() string {
+ return fmt.Sprintf("[GET /debuginfo][%d] getDebuginfoOK %+v", 200, o.Payload)
+}
+
+func (o *GetDebuginfoOK) GetPayload() *models.DebugInfo {
+ return o.Payload
+}
+
+func (o *GetDebuginfoOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ o.Payload = new(models.DebugInfo)
+
+ // response payload
+ if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewGetDebuginfoFailure creates a GetDebuginfoFailure with default headers values
+func NewGetDebuginfoFailure() *GetDebuginfoFailure {
+ return &GetDebuginfoFailure{}
+}
+
+/*
+GetDebuginfoFailure describes a response with status code 500, with default header values.
+
+DebugInfo get failed
+*/
+type GetDebuginfoFailure struct {
+ Payload models.Error
+}
+
+// IsSuccess returns true when this get debuginfo failure response has a 2xx status code
+func (o *GetDebuginfoFailure) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this get debuginfo failure response has a 3xx status code
+func (o *GetDebuginfoFailure) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get debuginfo failure response has a 4xx status code
+func (o *GetDebuginfoFailure) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get debuginfo failure response has a 5xx status code
+func (o *GetDebuginfoFailure) IsServerError() bool {
+ return true
+}
+
+// IsCode returns true when this get debuginfo failure response a status code equal to that given
+func (o *GetDebuginfoFailure) IsCode(code int) bool {
+ return code == 500
+}
+
+func (o *GetDebuginfoFailure) Error() string {
+ return fmt.Sprintf("[GET /debuginfo][%d] getDebuginfoFailure %+v", 500, o.Payload)
+}
+
+func (o *GetDebuginfoFailure) String() string {
+ return fmt.Sprintf("[GET /debuginfo][%d] getDebuginfoFailure %+v", 500, o.Payload)
+}
+
+func (o *GetDebuginfoFailure) GetPayload() models.Error {
+ return o.Payload
+}
+
+func (o *GetDebuginfoFailure) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_health_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_health_parameters.go
new file mode 100644
index 000000000..dba352258
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_health_parameters.go
@@ -0,0 +1,159 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package daemon
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+)
+
+// NewGetHealthParams creates a new GetHealthParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewGetHealthParams() *GetHealthParams {
+ return &GetHealthParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewGetHealthParamsWithTimeout creates a new GetHealthParams object
+// with the ability to set a timeout on a request.
+func NewGetHealthParamsWithTimeout(timeout time.Duration) *GetHealthParams {
+ return &GetHealthParams{
+ timeout: timeout,
+ }
+}
+
+// NewGetHealthParamsWithContext creates a new GetHealthParams object
+// with the ability to set a context for a request.
+func NewGetHealthParamsWithContext(ctx context.Context) *GetHealthParams {
+ return &GetHealthParams{
+ Context: ctx,
+ }
+}
+
+// NewGetHealthParamsWithHTTPClient creates a new GetHealthParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewGetHealthParamsWithHTTPClient(client *http.Client) *GetHealthParams {
+ return &GetHealthParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+GetHealthParams contains all the parameters to send to the API endpoint
+
+ for the get health operation.
+
+ Typically these are written to a http.Request.
+*/
+type GetHealthParams struct {
+
+ /* Brief.
+
+ Brief is a brief representation of the Cilium status.
+
+ */
+ Brief *bool
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the get health params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetHealthParams) WithDefaults() *GetHealthParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the get health params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetHealthParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the get health params
+func (o *GetHealthParams) WithTimeout(timeout time.Duration) *GetHealthParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the get health params
+func (o *GetHealthParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the get health params
+func (o *GetHealthParams) WithContext(ctx context.Context) *GetHealthParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the get health params
+func (o *GetHealthParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the get health params
+func (o *GetHealthParams) WithHTTPClient(client *http.Client) *GetHealthParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the get health params
+func (o *GetHealthParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithBrief adds the brief to the get health params
+func (o *GetHealthParams) WithBrief(brief *bool) *GetHealthParams {
+ o.SetBrief(brief)
+ return o
+}
+
+// SetBrief adds the brief to the get health params
+func (o *GetHealthParams) SetBrief(brief *bool) {
+ o.Brief = brief
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *GetHealthParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ if o.Brief != nil {
+
+ // header param brief
+ if err := r.SetHeaderParam("brief", swag.FormatBool(*o.Brief)); err != nil {
+ return err
+ }
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_health_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_health_responses.go
new file mode 100644
index 000000000..17a878fa1
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_health_responses.go
@@ -0,0 +1,101 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package daemon
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/cilium/cilium/api/v1/models"
+)
+
+// GetHealthReader is a Reader for the GetHealth structure.
+type GetHealthReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *GetHealthReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewGetHealthOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code())
+ }
+}
+
+// NewGetHealthOK creates a GetHealthOK with default headers values
+func NewGetHealthOK() *GetHealthOK {
+ return &GetHealthOK{}
+}
+
+/*
+GetHealthOK describes a response with status code 200, with default header values.
+
+Success
+*/
+type GetHealthOK struct {
+ Payload *models.ModulesHealth
+}
+
+// IsSuccess returns true when this get health o k response has a 2xx status code
+func (o *GetHealthOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this get health o k response has a 3xx status code
+func (o *GetHealthOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get health o k response has a 4xx status code
+func (o *GetHealthOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get health o k response has a 5xx status code
+func (o *GetHealthOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get health o k response a status code equal to that given
+func (o *GetHealthOK) IsCode(code int) bool {
+ return code == 200
+}
+
+func (o *GetHealthOK) Error() string {
+ return fmt.Sprintf("[GET /health][%d] getHealthOK %+v", 200, o.Payload)
+}
+
+func (o *GetHealthOK) String() string {
+ return fmt.Sprintf("[GET /health][%d] getHealthOK %+v", 200, o.Payload)
+}
+
+func (o *GetHealthOK) GetPayload() *models.ModulesHealth {
+ return o.Payload
+}
+
+func (o *GetHealthOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ o.Payload = new(models.ModulesHealth)
+
+ // response payload
+ if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_healthz_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_healthz_parameters.go
new file mode 100644
index 000000000..235c69da3
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_healthz_parameters.go
@@ -0,0 +1,159 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package daemon
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+)
+
+// NewGetHealthzParams creates a new GetHealthzParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewGetHealthzParams() *GetHealthzParams {
+ return &GetHealthzParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewGetHealthzParamsWithTimeout creates a new GetHealthzParams object
+// with the ability to set a timeout on a request.
+func NewGetHealthzParamsWithTimeout(timeout time.Duration) *GetHealthzParams {
+ return &GetHealthzParams{
+ timeout: timeout,
+ }
+}
+
+// NewGetHealthzParamsWithContext creates a new GetHealthzParams object
+// with the ability to set a context for a request.
+func NewGetHealthzParamsWithContext(ctx context.Context) *GetHealthzParams {
+ return &GetHealthzParams{
+ Context: ctx,
+ }
+}
+
+// NewGetHealthzParamsWithHTTPClient creates a new GetHealthzParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewGetHealthzParamsWithHTTPClient(client *http.Client) *GetHealthzParams {
+ return &GetHealthzParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+GetHealthzParams contains all the parameters to send to the API endpoint
+
+ for the get healthz operation.
+
+ Typically these are written to a http.Request.
+*/
+type GetHealthzParams struct {
+
+ /* Brief.
+
+ Brief will return a brief representation of the Cilium status.
+
+ */
+ Brief *bool
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the get healthz params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetHealthzParams) WithDefaults() *GetHealthzParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the get healthz params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetHealthzParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the get healthz params
+func (o *GetHealthzParams) WithTimeout(timeout time.Duration) *GetHealthzParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the get healthz params
+func (o *GetHealthzParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the get healthz params
+func (o *GetHealthzParams) WithContext(ctx context.Context) *GetHealthzParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the get healthz params
+func (o *GetHealthzParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the get healthz params
+func (o *GetHealthzParams) WithHTTPClient(client *http.Client) *GetHealthzParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the get healthz params
+func (o *GetHealthzParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithBrief adds the brief to the get healthz params
+func (o *GetHealthzParams) WithBrief(brief *bool) *GetHealthzParams {
+ o.SetBrief(brief)
+ return o
+}
+
+// SetBrief adds the brief to the get healthz params
+func (o *GetHealthzParams) SetBrief(brief *bool) {
+ o.Brief = brief
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *GetHealthzParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ if o.Brief != nil {
+
+ // header param brief
+ if err := r.SetHeaderParam("brief", swag.FormatBool(*o.Brief)); err != nil {
+ return err
+ }
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_healthz_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_healthz_responses.go
new file mode 100644
index 000000000..c3b73feea
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_healthz_responses.go
@@ -0,0 +1,101 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package daemon
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/cilium/cilium/api/v1/models"
+)
+
+// GetHealthzReader is a Reader for the GetHealthz structure.
+type GetHealthzReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *GetHealthzReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewGetHealthzOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code())
+ }
+}
+
+// NewGetHealthzOK creates a GetHealthzOK with default headers values
+func NewGetHealthzOK() *GetHealthzOK {
+ return &GetHealthzOK{}
+}
+
+/*
+GetHealthzOK describes a response with status code 200, with default header values.
+
+Success
+*/
+type GetHealthzOK struct {
+ Payload *models.StatusResponse
+}
+
+// IsSuccess returns true when this get healthz o k response has a 2xx status code
+func (o *GetHealthzOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this get healthz o k response has a 3xx status code
+func (o *GetHealthzOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get healthz o k response has a 4xx status code
+func (o *GetHealthzOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get healthz o k response has a 5xx status code
+func (o *GetHealthzOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get healthz o k response a status code equal to that given
+func (o *GetHealthzOK) IsCode(code int) bool {
+ return code == 200
+}
+
+func (o *GetHealthzOK) Error() string {
+ return fmt.Sprintf("[GET /healthz][%d] getHealthzOK %+v", 200, o.Payload)
+}
+
+func (o *GetHealthzOK) String() string {
+ return fmt.Sprintf("[GET /healthz][%d] getHealthzOK %+v", 200, o.Payload)
+}
+
+func (o *GetHealthzOK) GetPayload() *models.StatusResponse {
+ return o.Payload
+}
+
+func (o *GetHealthzOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ o.Payload = new(models.StatusResponse)
+
+ // response payload
+ if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_map_name_events_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_map_name_events_parameters.go
new file mode 100644
index 000000000..41520f507
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_map_name_events_parameters.go
@@ -0,0 +1,189 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package daemon
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+)
+
+// NewGetMapNameEventsParams creates a new GetMapNameEventsParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewGetMapNameEventsParams() *GetMapNameEventsParams {
+ return &GetMapNameEventsParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewGetMapNameEventsParamsWithTimeout creates a new GetMapNameEventsParams object
+// with the ability to set a timeout on a request.
+func NewGetMapNameEventsParamsWithTimeout(timeout time.Duration) *GetMapNameEventsParams {
+ return &GetMapNameEventsParams{
+ timeout: timeout,
+ }
+}
+
+// NewGetMapNameEventsParamsWithContext creates a new GetMapNameEventsParams object
+// with the ability to set a context for a request.
+func NewGetMapNameEventsParamsWithContext(ctx context.Context) *GetMapNameEventsParams {
+ return &GetMapNameEventsParams{
+ Context: ctx,
+ }
+}
+
+// NewGetMapNameEventsParamsWithHTTPClient creates a new GetMapNameEventsParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewGetMapNameEventsParamsWithHTTPClient(client *http.Client) *GetMapNameEventsParams {
+ return &GetMapNameEventsParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+GetMapNameEventsParams contains all the parameters to send to the API endpoint
+
+ for the get map name events operation.
+
+ Typically these are written to a http.Request.
+*/
+type GetMapNameEventsParams struct {
+
+ /* Follow.
+
+ Whether to follow streamed requests
+ */
+ Follow *bool
+
+ /* Name.
+
+ Name of map
+ */
+ Name string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the get map name events params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetMapNameEventsParams) WithDefaults() *GetMapNameEventsParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the get map name events params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetMapNameEventsParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the get map name events params
+func (o *GetMapNameEventsParams) WithTimeout(timeout time.Duration) *GetMapNameEventsParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the get map name events params
+func (o *GetMapNameEventsParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the get map name events params
+func (o *GetMapNameEventsParams) WithContext(ctx context.Context) *GetMapNameEventsParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the get map name events params
+func (o *GetMapNameEventsParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the get map name events params
+func (o *GetMapNameEventsParams) WithHTTPClient(client *http.Client) *GetMapNameEventsParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the get map name events params
+func (o *GetMapNameEventsParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithFollow adds the follow to the get map name events params
+func (o *GetMapNameEventsParams) WithFollow(follow *bool) *GetMapNameEventsParams {
+ o.SetFollow(follow)
+ return o
+}
+
+// SetFollow adds the follow to the get map name events params
+func (o *GetMapNameEventsParams) SetFollow(follow *bool) {
+ o.Follow = follow
+}
+
+// WithName adds the name to the get map name events params
+func (o *GetMapNameEventsParams) WithName(name string) *GetMapNameEventsParams {
+ o.SetName(name)
+ return o
+}
+
+// SetName adds the name to the get map name events params
+func (o *GetMapNameEventsParams) SetName(name string) {
+ o.Name = name
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *GetMapNameEventsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ if o.Follow != nil {
+
+ // query param follow
+ var qrFollow bool
+
+ if o.Follow != nil {
+ qrFollow = *o.Follow
+ }
+ qFollow := swag.FormatBool(qrFollow)
+ if qFollow != "" {
+
+ if err := r.SetQueryParam("follow", qFollow); err != nil {
+ return err
+ }
+ }
+ }
+
+ // path param name
+ if err := r.SetPathParam("name", o.Name); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_map_name_events_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_map_name_events_responses.go
new file mode 100644
index 000000000..6dc187478
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_map_name_events_responses.go
@@ -0,0 +1,158 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package daemon
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+)
+
+// GetMapNameEventsReader is a Reader for the GetMapNameEvents structure.
+type GetMapNameEventsReader struct {
+ formats strfmt.Registry
+ writer io.Writer
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *GetMapNameEventsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewGetMapNameEventsOK(o.writer)
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 404:
+ result := NewGetMapNameEventsNotFound()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ default:
+ return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code())
+ }
+}
+
+// NewGetMapNameEventsOK creates a GetMapNameEventsOK with default headers values
+func NewGetMapNameEventsOK(writer io.Writer) *GetMapNameEventsOK {
+ return &GetMapNameEventsOK{
+
+ Payload: writer,
+ }
+}
+
+/*
+GetMapNameEventsOK describes a response with status code 200, with default header values.
+
+Success
+*/
+type GetMapNameEventsOK struct {
+ Payload io.Writer
+}
+
+// IsSuccess returns true when this get map name events o k response has a 2xx status code
+func (o *GetMapNameEventsOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this get map name events o k response has a 3xx status code
+func (o *GetMapNameEventsOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get map name events o k response has a 4xx status code
+func (o *GetMapNameEventsOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get map name events o k response has a 5xx status code
+func (o *GetMapNameEventsOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get map name events o k response a status code equal to that given
+func (o *GetMapNameEventsOK) IsCode(code int) bool {
+ return code == 200
+}
+
+func (o *GetMapNameEventsOK) Error() string {
+ return fmt.Sprintf("[GET /map/{name}/events][%d] getMapNameEventsOK %+v", 200, o.Payload)
+}
+
+func (o *GetMapNameEventsOK) String() string {
+ return fmt.Sprintf("[GET /map/{name}/events][%d] getMapNameEventsOK %+v", 200, o.Payload)
+}
+
+func (o *GetMapNameEventsOK) GetPayload() io.Writer {
+ return o.Payload
+}
+
+func (o *GetMapNameEventsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewGetMapNameEventsNotFound creates a GetMapNameEventsNotFound with default headers values
+func NewGetMapNameEventsNotFound() *GetMapNameEventsNotFound {
+ return &GetMapNameEventsNotFound{}
+}
+
+/*
+GetMapNameEventsNotFound describes a response with status code 404, with default header values.
+
+Map not found
+*/
+type GetMapNameEventsNotFound struct {
+}
+
+// IsSuccess returns true when this get map name events not found response has a 2xx status code
+func (o *GetMapNameEventsNotFound) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this get map name events not found response has a 3xx status code
+func (o *GetMapNameEventsNotFound) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get map name events not found response has a 4xx status code
+func (o *GetMapNameEventsNotFound) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this get map name events not found response has a 5xx status code
+func (o *GetMapNameEventsNotFound) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get map name events not found response a status code equal to that given
+func (o *GetMapNameEventsNotFound) IsCode(code int) bool {
+ return code == 404
+}
+
+func (o *GetMapNameEventsNotFound) Error() string {
+ return fmt.Sprintf("[GET /map/{name}/events][%d] getMapNameEventsNotFound ", 404)
+}
+
+func (o *GetMapNameEventsNotFound) String() string {
+ return fmt.Sprintf("[GET /map/{name}/events][%d] getMapNameEventsNotFound ", 404)
+}
+
+func (o *GetMapNameEventsNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_map_name_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_map_name_parameters.go
new file mode 100644
index 000000000..9465ac2bb
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_map_name_parameters.go
@@ -0,0 +1,154 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package daemon
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewGetMapNameParams creates a new GetMapNameParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewGetMapNameParams() *GetMapNameParams {
+ return &GetMapNameParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewGetMapNameParamsWithTimeout creates a new GetMapNameParams object
+// with the ability to set a timeout on a request.
+func NewGetMapNameParamsWithTimeout(timeout time.Duration) *GetMapNameParams {
+ return &GetMapNameParams{
+ timeout: timeout,
+ }
+}
+
+// NewGetMapNameParamsWithContext creates a new GetMapNameParams object
+// with the ability to set a context for a request.
+func NewGetMapNameParamsWithContext(ctx context.Context) *GetMapNameParams {
+ return &GetMapNameParams{
+ Context: ctx,
+ }
+}
+
+// NewGetMapNameParamsWithHTTPClient creates a new GetMapNameParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewGetMapNameParamsWithHTTPClient(client *http.Client) *GetMapNameParams {
+ return &GetMapNameParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+GetMapNameParams contains all the parameters to send to the API endpoint
+
+ for the get map name operation.
+
+ Typically these are written to a http.Request.
+*/
+type GetMapNameParams struct {
+
+ /* Name.
+
+ Name of map
+ */
+ Name string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the get map name params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetMapNameParams) WithDefaults() *GetMapNameParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the get map name params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetMapNameParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the get map name params
+func (o *GetMapNameParams) WithTimeout(timeout time.Duration) *GetMapNameParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the get map name params
+func (o *GetMapNameParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the get map name params
+func (o *GetMapNameParams) WithContext(ctx context.Context) *GetMapNameParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the get map name params
+func (o *GetMapNameParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the get map name params
+func (o *GetMapNameParams) WithHTTPClient(client *http.Client) *GetMapNameParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the get map name params
+func (o *GetMapNameParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithName adds the name to the get map name params
+func (o *GetMapNameParams) WithName(name string) *GetMapNameParams {
+ o.SetName(name)
+ return o
+}
+
+// SetName adds the name to the get map name params
+func (o *GetMapNameParams) SetName(name string) {
+ o.Name = name
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *GetMapNameParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ // path param name
+ if err := r.SetPathParam("name", o.Name); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_map_name_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_map_name_responses.go
new file mode 100644
index 000000000..f03de4ce3
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_map_name_responses.go
@@ -0,0 +1,158 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package daemon
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/cilium/cilium/api/v1/models"
+)
+
+// GetMapNameReader is a Reader for the GetMapName structure.
+type GetMapNameReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *GetMapNameReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewGetMapNameOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 404:
+ result := NewGetMapNameNotFound()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ default:
+ return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code())
+ }
+}
+
+// NewGetMapNameOK creates a GetMapNameOK with default headers values
+func NewGetMapNameOK() *GetMapNameOK {
+ return &GetMapNameOK{}
+}
+
+/*
+GetMapNameOK describes a response with status code 200, with default header values.
+
+Success
+*/
+type GetMapNameOK struct {
+ Payload *models.BPFMap
+}
+
+// IsSuccess returns true when this get map name o k response has a 2xx status code
+func (o *GetMapNameOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this get map name o k response has a 3xx status code
+func (o *GetMapNameOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get map name o k response has a 4xx status code
+func (o *GetMapNameOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get map name o k response has a 5xx status code
+func (o *GetMapNameOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get map name o k response a status code equal to that given
+func (o *GetMapNameOK) IsCode(code int) bool {
+ return code == 200
+}
+
+func (o *GetMapNameOK) Error() string {
+ return fmt.Sprintf("[GET /map/{name}][%d] getMapNameOK %+v", 200, o.Payload)
+}
+
+func (o *GetMapNameOK) String() string {
+ return fmt.Sprintf("[GET /map/{name}][%d] getMapNameOK %+v", 200, o.Payload)
+}
+
+func (o *GetMapNameOK) GetPayload() *models.BPFMap {
+ return o.Payload
+}
+
+func (o *GetMapNameOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ o.Payload = new(models.BPFMap)
+
+ // response payload
+ if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewGetMapNameNotFound creates a GetMapNameNotFound with default headers values
+func NewGetMapNameNotFound() *GetMapNameNotFound {
+ return &GetMapNameNotFound{}
+}
+
+/*
+GetMapNameNotFound describes a response with status code 404, with default header values.
+
+Map not found
+*/
+type GetMapNameNotFound struct {
+}
+
+// IsSuccess returns true when this get map name not found response has a 2xx status code
+func (o *GetMapNameNotFound) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this get map name not found response has a 3xx status code
+func (o *GetMapNameNotFound) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get map name not found response has a 4xx status code
+func (o *GetMapNameNotFound) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this get map name not found response has a 5xx status code
+func (o *GetMapNameNotFound) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get map name not found response a status code equal to that given
+func (o *GetMapNameNotFound) IsCode(code int) bool {
+ return code == 404
+}
+
+func (o *GetMapNameNotFound) Error() string {
+ return fmt.Sprintf("[GET /map/{name}][%d] getMapNameNotFound ", 404)
+}
+
+func (o *GetMapNameNotFound) String() string {
+ return fmt.Sprintf("[GET /map/{name}][%d] getMapNameNotFound ", 404)
+}
+
+func (o *GetMapNameNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_map_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_map_parameters.go
new file mode 100644
index 000000000..05cca29d7
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_map_parameters.go
@@ -0,0 +1,131 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package daemon
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewGetMapParams creates a new GetMapParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewGetMapParams() *GetMapParams {
+ return &GetMapParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewGetMapParamsWithTimeout creates a new GetMapParams object
+// with the ability to set a timeout on a request.
+func NewGetMapParamsWithTimeout(timeout time.Duration) *GetMapParams {
+ return &GetMapParams{
+ timeout: timeout,
+ }
+}
+
+// NewGetMapParamsWithContext creates a new GetMapParams object
+// with the ability to set a context for a request.
+func NewGetMapParamsWithContext(ctx context.Context) *GetMapParams {
+ return &GetMapParams{
+ Context: ctx,
+ }
+}
+
+// NewGetMapParamsWithHTTPClient creates a new GetMapParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewGetMapParamsWithHTTPClient(client *http.Client) *GetMapParams {
+ return &GetMapParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+GetMapParams contains all the parameters to send to the API endpoint
+
+ for the get map operation.
+
+ Typically these are written to a http.Request.
+*/
+type GetMapParams struct {
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the get map params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetMapParams) WithDefaults() *GetMapParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the get map params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetMapParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the get map params
+func (o *GetMapParams) WithTimeout(timeout time.Duration) *GetMapParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the get map params
+func (o *GetMapParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the get map params
+func (o *GetMapParams) WithContext(ctx context.Context) *GetMapParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the get map params
+func (o *GetMapParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the get map params
+func (o *GetMapParams) WithHTTPClient(client *http.Client) *GetMapParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the get map params
+func (o *GetMapParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *GetMapParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_map_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_map_responses.go
new file mode 100644
index 000000000..dae2f2cf5
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_map_responses.go
@@ -0,0 +1,101 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package daemon
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/cilium/cilium/api/v1/models"
+)
+
+// GetMapReader is a Reader for the GetMap structure.
+type GetMapReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *GetMapReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewGetMapOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code())
+ }
+}
+
+// NewGetMapOK creates a GetMapOK with default headers values
+func NewGetMapOK() *GetMapOK {
+ return &GetMapOK{}
+}
+
+/*
+GetMapOK describes a response with status code 200, with default header values.
+
+Success
+*/
+type GetMapOK struct {
+ Payload *models.BPFMapList
+}
+
+// IsSuccess returns true when this get map o k response has a 2xx status code
+func (o *GetMapOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this get map o k response has a 3xx status code
+func (o *GetMapOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get map o k response has a 4xx status code
+func (o *GetMapOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get map o k response has a 5xx status code
+func (o *GetMapOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get map o k response a status code equal to that given
+func (o *GetMapOK) IsCode(code int) bool {
+ return code == 200
+}
+
+func (o *GetMapOK) Error() string {
+ return fmt.Sprintf("[GET /map][%d] getMapOK %+v", 200, o.Payload)
+}
+
+func (o *GetMapOK) String() string {
+ return fmt.Sprintf("[GET /map][%d] getMapOK %+v", 200, o.Payload)
+}
+
+func (o *GetMapOK) GetPayload() *models.BPFMapList {
+ return o.Payload
+}
+
+func (o *GetMapOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ o.Payload = new(models.BPFMapList)
+
+ // response payload
+ if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_node_ids_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_node_ids_parameters.go
new file mode 100644
index 000000000..19fce1163
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_node_ids_parameters.go
@@ -0,0 +1,131 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package daemon
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewGetNodeIdsParams creates a new GetNodeIdsParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewGetNodeIdsParams() *GetNodeIdsParams {
+ return &GetNodeIdsParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewGetNodeIdsParamsWithTimeout creates a new GetNodeIdsParams object
+// with the ability to set a timeout on a request.
+func NewGetNodeIdsParamsWithTimeout(timeout time.Duration) *GetNodeIdsParams {
+ return &GetNodeIdsParams{
+ timeout: timeout,
+ }
+}
+
+// NewGetNodeIdsParamsWithContext creates a new GetNodeIdsParams object
+// with the ability to set a context for a request.
+func NewGetNodeIdsParamsWithContext(ctx context.Context) *GetNodeIdsParams {
+ return &GetNodeIdsParams{
+ Context: ctx,
+ }
+}
+
+// NewGetNodeIdsParamsWithHTTPClient creates a new GetNodeIdsParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewGetNodeIdsParamsWithHTTPClient(client *http.Client) *GetNodeIdsParams {
+ return &GetNodeIdsParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+GetNodeIdsParams contains all the parameters to send to the API endpoint
+
+ for the get node ids operation.
+
+ Typically these are written to a http.Request.
+*/
+type GetNodeIdsParams struct {
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the get node ids params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetNodeIdsParams) WithDefaults() *GetNodeIdsParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the get node ids params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetNodeIdsParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the get node ids params
+func (o *GetNodeIdsParams) WithTimeout(timeout time.Duration) *GetNodeIdsParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the get node ids params
+func (o *GetNodeIdsParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the get node ids params
+func (o *GetNodeIdsParams) WithContext(ctx context.Context) *GetNodeIdsParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the get node ids params
+func (o *GetNodeIdsParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the get node ids params
+func (o *GetNodeIdsParams) WithHTTPClient(client *http.Client) *GetNodeIdsParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the get node ids params
+func (o *GetNodeIdsParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *GetNodeIdsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_node_ids_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_node_ids_responses.go
new file mode 100644
index 000000000..1b4131485
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_node_ids_responses.go
@@ -0,0 +1,99 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package daemon
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/cilium/cilium/api/v1/models"
+)
+
+// GetNodeIdsReader is a Reader for the GetNodeIds structure.
+type GetNodeIdsReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *GetNodeIdsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewGetNodeIdsOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code())
+ }
+}
+
+// NewGetNodeIdsOK creates a GetNodeIdsOK with default headers values
+func NewGetNodeIdsOK() *GetNodeIdsOK {
+ return &GetNodeIdsOK{}
+}
+
+/*
+GetNodeIdsOK describes a response with status code 200, with default header values.
+
+Success
+*/
+type GetNodeIdsOK struct {
+ Payload []*models.NodeID
+}
+
+// IsSuccess returns true when this get node ids o k response has a 2xx status code
+func (o *GetNodeIdsOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this get node ids o k response has a 3xx status code
+func (o *GetNodeIdsOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get node ids o k response has a 4xx status code
+func (o *GetNodeIdsOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get node ids o k response has a 5xx status code
+func (o *GetNodeIdsOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get node ids o k response a status code equal to that given
+func (o *GetNodeIdsOK) IsCode(code int) bool {
+ return code == 200
+}
+
+func (o *GetNodeIdsOK) Error() string {
+ return fmt.Sprintf("[GET /node/ids][%d] getNodeIdsOK %+v", 200, o.Payload)
+}
+
+func (o *GetNodeIdsOK) String() string {
+ return fmt.Sprintf("[GET /node/ids][%d] getNodeIdsOK %+v", 200, o.Payload)
+}
+
+func (o *GetNodeIdsOK) GetPayload() []*models.NodeID {
+ return o.Payload
+}
+
+func (o *GetNodeIdsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/daemon/patch_config_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/daemon/patch_config_parameters.go
new file mode 100644
index 000000000..a95be301c
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/daemon/patch_config_parameters.go
@@ -0,0 +1,153 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package daemon
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/cilium/cilium/api/v1/models"
+)
+
+// NewPatchConfigParams creates a new PatchConfigParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewPatchConfigParams() *PatchConfigParams {
+ return &PatchConfigParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewPatchConfigParamsWithTimeout creates a new PatchConfigParams object
+// with the ability to set a timeout on a request.
+func NewPatchConfigParamsWithTimeout(timeout time.Duration) *PatchConfigParams {
+ return &PatchConfigParams{
+ timeout: timeout,
+ }
+}
+
+// NewPatchConfigParamsWithContext creates a new PatchConfigParams object
+// with the ability to set a context for a request.
+func NewPatchConfigParamsWithContext(ctx context.Context) *PatchConfigParams {
+ return &PatchConfigParams{
+ Context: ctx,
+ }
+}
+
+// NewPatchConfigParamsWithHTTPClient creates a new PatchConfigParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewPatchConfigParamsWithHTTPClient(client *http.Client) *PatchConfigParams {
+ return &PatchConfigParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+PatchConfigParams contains all the parameters to send to the API endpoint
+
+ for the patch config operation.
+
+ Typically these are written to a http.Request.
+*/
+type PatchConfigParams struct {
+
+ // Configuration.
+ Configuration *models.DaemonConfigurationSpec
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the patch config params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *PatchConfigParams) WithDefaults() *PatchConfigParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the patch config params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *PatchConfigParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the patch config params
+func (o *PatchConfigParams) WithTimeout(timeout time.Duration) *PatchConfigParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the patch config params
+func (o *PatchConfigParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the patch config params
+func (o *PatchConfigParams) WithContext(ctx context.Context) *PatchConfigParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the patch config params
+func (o *PatchConfigParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the patch config params
+func (o *PatchConfigParams) WithHTTPClient(client *http.Client) *PatchConfigParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the patch config params
+func (o *PatchConfigParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithConfiguration adds the configuration to the patch config params
+func (o *PatchConfigParams) WithConfiguration(configuration *models.DaemonConfigurationSpec) *PatchConfigParams {
+ o.SetConfiguration(configuration)
+ return o
+}
+
+// SetConfiguration adds the configuration to the patch config params
+func (o *PatchConfigParams) SetConfiguration(configuration *models.DaemonConfigurationSpec) {
+ o.Configuration = configuration
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *PatchConfigParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+ if o.Configuration != nil {
+ if err := r.SetBodyParam(o.Configuration); err != nil {
+ return err
+ }
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/daemon/patch_config_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/daemon/patch_config_responses.go
new file mode 100644
index 000000000..87397f675
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/daemon/patch_config_responses.go
@@ -0,0 +1,280 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package daemon
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/cilium/cilium/api/v1/models"
+)
+
+// PatchConfigReader is a Reader for the PatchConfig structure.
+type PatchConfigReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *PatchConfigReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewPatchConfigOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 400:
+ result := NewPatchConfigBadRequest()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ case 403:
+ result := NewPatchConfigForbidden()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ case 500:
+ result := NewPatchConfigFailure()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ default:
+ return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code())
+ }
+}
+
+// NewPatchConfigOK creates a PatchConfigOK with default headers values
+func NewPatchConfigOK() *PatchConfigOK {
+ return &PatchConfigOK{}
+}
+
+/*
+PatchConfigOK describes a response with status code 200, with default header values.
+
+Success
+*/
+type PatchConfigOK struct {
+}
+
+// IsSuccess returns true when this patch config o k response has a 2xx status code
+func (o *PatchConfigOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this patch config o k response has a 3xx status code
+func (o *PatchConfigOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this patch config o k response has a 4xx status code
+func (o *PatchConfigOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this patch config o k response has a 5xx status code
+func (o *PatchConfigOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this patch config o k response a status code equal to that given
+func (o *PatchConfigOK) IsCode(code int) bool {
+ return code == 200
+}
+
+func (o *PatchConfigOK) Error() string {
+ return fmt.Sprintf("[PATCH /config][%d] patchConfigOK ", 200)
+}
+
+func (o *PatchConfigOK) String() string {
+ return fmt.Sprintf("[PATCH /config][%d] patchConfigOK ", 200)
+}
+
+func (o *PatchConfigOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
+
+// NewPatchConfigBadRequest creates a PatchConfigBadRequest with default headers values
+func NewPatchConfigBadRequest() *PatchConfigBadRequest {
+ return &PatchConfigBadRequest{}
+}
+
+/*
+PatchConfigBadRequest describes a response with status code 400, with default header values.
+
+Bad configuration parameters
+*/
+type PatchConfigBadRequest struct {
+ Payload models.Error
+}
+
+// IsSuccess returns true when this patch config bad request response has a 2xx status code
+func (o *PatchConfigBadRequest) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this patch config bad request response has a 3xx status code
+func (o *PatchConfigBadRequest) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this patch config bad request response has a 4xx status code
+func (o *PatchConfigBadRequest) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this patch config bad request response has a 5xx status code
+func (o *PatchConfigBadRequest) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this patch config bad request response a status code equal to that given
+func (o *PatchConfigBadRequest) IsCode(code int) bool {
+ return code == 400
+}
+
+func (o *PatchConfigBadRequest) Error() string {
+ return fmt.Sprintf("[PATCH /config][%d] patchConfigBadRequest %+v", 400, o.Payload)
+}
+
+func (o *PatchConfigBadRequest) String() string {
+ return fmt.Sprintf("[PATCH /config][%d] patchConfigBadRequest %+v", 400, o.Payload)
+}
+
+func (o *PatchConfigBadRequest) GetPayload() models.Error {
+ return o.Payload
+}
+
+func (o *PatchConfigBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewPatchConfigForbidden creates a PatchConfigForbidden with default headers values
+func NewPatchConfigForbidden() *PatchConfigForbidden {
+ return &PatchConfigForbidden{}
+}
+
+/*
+PatchConfigForbidden describes a response with status code 403, with default header values.
+
+Forbidden
+*/
+type PatchConfigForbidden struct {
+}
+
+// IsSuccess returns true when this patch config forbidden response has a 2xx status code
+func (o *PatchConfigForbidden) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this patch config forbidden response has a 3xx status code
+func (o *PatchConfigForbidden) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this patch config forbidden response has a 4xx status code
+func (o *PatchConfigForbidden) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this patch config forbidden response has a 5xx status code
+func (o *PatchConfigForbidden) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this patch config forbidden response a status code equal to that given
+func (o *PatchConfigForbidden) IsCode(code int) bool {
+ return code == 403
+}
+
+func (o *PatchConfigForbidden) Error() string {
+ return fmt.Sprintf("[PATCH /config][%d] patchConfigForbidden ", 403)
+}
+
+func (o *PatchConfigForbidden) String() string {
+ return fmt.Sprintf("[PATCH /config][%d] patchConfigForbidden ", 403)
+}
+
+func (o *PatchConfigForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
+
+// NewPatchConfigFailure creates a PatchConfigFailure with default headers values
+func NewPatchConfigFailure() *PatchConfigFailure {
+ return &PatchConfigFailure{}
+}
+
+/*
+PatchConfigFailure describes a response with status code 500, with default header values.
+
+Recompilation failed
+*/
+type PatchConfigFailure struct {
+ Payload models.Error
+}
+
+// IsSuccess returns true when this patch config failure response has a 2xx status code
+func (o *PatchConfigFailure) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this patch config failure response has a 3xx status code
+func (o *PatchConfigFailure) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this patch config failure response has a 4xx status code
+func (o *PatchConfigFailure) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this patch config failure response has a 5xx status code
+func (o *PatchConfigFailure) IsServerError() bool {
+ return true
+}
+
+// IsCode returns true when this patch config failure response a status code equal to that given
+func (o *PatchConfigFailure) IsCode(code int) bool {
+ return code == 500
+}
+
+func (o *PatchConfigFailure) Error() string {
+ return fmt.Sprintf("[PATCH /config][%d] patchConfigFailure %+v", 500, o.Payload)
+}
+
+func (o *PatchConfigFailure) String() string {
+ return fmt.Sprintf("[PATCH /config][%d] patchConfigFailure %+v", 500, o.Payload)
+}
+
+func (o *PatchConfigFailure) GetPayload() models.Error {
+ return o.Payload
+}
+
+func (o *PatchConfigFailure) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/delete_endpoint_id_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/delete_endpoint_id_parameters.go
new file mode 100644
index 000000000..28d2c6237
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/delete_endpoint_id_parameters.go
@@ -0,0 +1,168 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package endpoint
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewDeleteEndpointIDParams creates a new DeleteEndpointIDParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewDeleteEndpointIDParams() *DeleteEndpointIDParams {
+ return &DeleteEndpointIDParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewDeleteEndpointIDParamsWithTimeout creates a new DeleteEndpointIDParams object
+// with the ability to set a timeout on a request.
+func NewDeleteEndpointIDParamsWithTimeout(timeout time.Duration) *DeleteEndpointIDParams {
+ return &DeleteEndpointIDParams{
+ timeout: timeout,
+ }
+}
+
+// NewDeleteEndpointIDParamsWithContext creates a new DeleteEndpointIDParams object
+// with the ability to set a context for a request.
+func NewDeleteEndpointIDParamsWithContext(ctx context.Context) *DeleteEndpointIDParams {
+ return &DeleteEndpointIDParams{
+ Context: ctx,
+ }
+}
+
+// NewDeleteEndpointIDParamsWithHTTPClient creates a new DeleteEndpointIDParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewDeleteEndpointIDParamsWithHTTPClient(client *http.Client) *DeleteEndpointIDParams {
+ return &DeleteEndpointIDParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+DeleteEndpointIDParams contains all the parameters to send to the API endpoint
+
+ for the delete endpoint ID operation.
+
+ Typically these are written to a http.Request.
+*/
+type DeleteEndpointIDParams struct {
+
+ /* ID.
+
+ String describing an endpoint with the format ``[prefix:]id``. If no prefix
+ is specified, a prefix of ``cilium-local:`` is assumed. Not all endpoints
+ will be addressable by all endpoint ID prefixes with the exception of the
+ local Cilium UUID which is assigned to all endpoints.
+
+ Supported endpoint id prefixes:
+ - cilium-local: Local Cilium endpoint UUID, e.g. cilium-local:3389595
+ - cilium-global: Global Cilium endpoint UUID, e.g. cilium-global:cluster1:nodeX:452343
+ - cni-attachment-id: CNI attachment ID, e.g. cni-attachment-id:22222:eth0
+ - container-id: Container runtime ID, e.g. container-id:22222 (deprecated, may not be unique)
+ - container-name: Container name, e.g. container-name:foobar (deprecated, may not be unique)
+ - pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar (deprecated, may not be unique)
+ - cep-name: cep name for this container if K8s is enabled, e.g. pod-name:default:foobar-net1
+ - docker-endpoint: Docker libnetwork endpoint ID, e.g. docker-endpoint:4444
+
+ */
+ ID string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the delete endpoint ID params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *DeleteEndpointIDParams) WithDefaults() *DeleteEndpointIDParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the delete endpoint ID params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *DeleteEndpointIDParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the delete endpoint ID params
+func (o *DeleteEndpointIDParams) WithTimeout(timeout time.Duration) *DeleteEndpointIDParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the delete endpoint ID params
+func (o *DeleteEndpointIDParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the delete endpoint ID params
+func (o *DeleteEndpointIDParams) WithContext(ctx context.Context) *DeleteEndpointIDParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the delete endpoint ID params
+func (o *DeleteEndpointIDParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the delete endpoint ID params
+func (o *DeleteEndpointIDParams) WithHTTPClient(client *http.Client) *DeleteEndpointIDParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the delete endpoint ID params
+func (o *DeleteEndpointIDParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithID adds the id to the delete endpoint ID params
+func (o *DeleteEndpointIDParams) WithID(id string) *DeleteEndpointIDParams {
+ o.SetID(id)
+ return o
+}
+
+// SetID adds the id to the delete endpoint ID params
+func (o *DeleteEndpointIDParams) SetID(id string) {
+ o.ID = id
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *DeleteEndpointIDParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ // path param id
+ if err := r.SetPathParam("id", o.ID); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/delete_endpoint_id_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/delete_endpoint_id_responses.go
new file mode 100644
index 000000000..b99123fec
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/delete_endpoint_id_responses.go
@@ -0,0 +1,396 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package endpoint
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/cilium/cilium/api/v1/models"
+)
+
+// DeleteEndpointIDReader is a Reader for the DeleteEndpointID structure.
+type DeleteEndpointIDReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *DeleteEndpointIDReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewDeleteEndpointIDOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 206:
+ result := NewDeleteEndpointIDErrors()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 400:
+ result := NewDeleteEndpointIDInvalid()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ case 403:
+ result := NewDeleteEndpointIDForbidden()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ case 404:
+ result := NewDeleteEndpointIDNotFound()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ case 429:
+ result := NewDeleteEndpointIDTooManyRequests()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ default:
+ return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code())
+ }
+}
+
+// NewDeleteEndpointIDOK creates a DeleteEndpointIDOK with default headers values
+func NewDeleteEndpointIDOK() *DeleteEndpointIDOK {
+ return &DeleteEndpointIDOK{}
+}
+
+/*
+DeleteEndpointIDOK describes a response with status code 200, with default header values.
+
+Success
+*/
+type DeleteEndpointIDOK struct {
+}
+
+// IsSuccess returns true when this delete endpoint Id o k response has a 2xx status code
+func (o *DeleteEndpointIDOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this delete endpoint Id o k response has a 3xx status code
+func (o *DeleteEndpointIDOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this delete endpoint Id o k response has a 4xx status code
+func (o *DeleteEndpointIDOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this delete endpoint Id o k response has a 5xx status code
+func (o *DeleteEndpointIDOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this delete endpoint Id o k response a status code equal to that given
+func (o *DeleteEndpointIDOK) IsCode(code int) bool {
+ return code == 200
+}
+
+func (o *DeleteEndpointIDOK) Error() string {
+ return fmt.Sprintf("[DELETE /endpoint/{id}][%d] deleteEndpointIdOK ", 200)
+}
+
+func (o *DeleteEndpointIDOK) String() string {
+ return fmt.Sprintf("[DELETE /endpoint/{id}][%d] deleteEndpointIdOK ", 200)
+}
+
+func (o *DeleteEndpointIDOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
+
+// NewDeleteEndpointIDErrors creates a DeleteEndpointIDErrors with default headers values
+func NewDeleteEndpointIDErrors() *DeleteEndpointIDErrors {
+ return &DeleteEndpointIDErrors{}
+}
+
+/*
+DeleteEndpointIDErrors describes a response with status code 206, with default header values.
+
+Deleted with a number of errors encountered
+*/
+type DeleteEndpointIDErrors struct {
+ Payload int64
+}
+
+// IsSuccess returns true when this delete endpoint Id errors response has a 2xx status code
+func (o *DeleteEndpointIDErrors) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this delete endpoint Id errors response has a 3xx status code
+func (o *DeleteEndpointIDErrors) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this delete endpoint Id errors response has a 4xx status code
+func (o *DeleteEndpointIDErrors) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this delete endpoint Id errors response has a 5xx status code
+func (o *DeleteEndpointIDErrors) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this delete endpoint Id errors response a status code equal to that given
+func (o *DeleteEndpointIDErrors) IsCode(code int) bool {
+ return code == 206
+}
+
+func (o *DeleteEndpointIDErrors) Error() string {
+ return fmt.Sprintf("[DELETE /endpoint/{id}][%d] deleteEndpointIdErrors %+v", 206, o.Payload)
+}
+
+func (o *DeleteEndpointIDErrors) String() string {
+ return fmt.Sprintf("[DELETE /endpoint/{id}][%d] deleteEndpointIdErrors %+v", 206, o.Payload)
+}
+
+func (o *DeleteEndpointIDErrors) GetPayload() int64 {
+ return o.Payload
+}
+
+func (o *DeleteEndpointIDErrors) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewDeleteEndpointIDInvalid creates a DeleteEndpointIDInvalid with default headers values
+func NewDeleteEndpointIDInvalid() *DeleteEndpointIDInvalid {
+ return &DeleteEndpointIDInvalid{}
+}
+
+/*
+ DeleteEndpointIDInvalid describes a response with status code 400, with default header values.
+
+ Invalid endpoint ID format for specified type. Details in error
+
+message
+*/
+type DeleteEndpointIDInvalid struct {
+ Payload models.Error
+}
+
+// IsSuccess returns true when this delete endpoint Id invalid response has a 2xx status code
+func (o *DeleteEndpointIDInvalid) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this delete endpoint Id invalid response has a 3xx status code
+func (o *DeleteEndpointIDInvalid) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this delete endpoint Id invalid response has a 4xx status code
+func (o *DeleteEndpointIDInvalid) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this delete endpoint Id invalid response has a 5xx status code
+func (o *DeleteEndpointIDInvalid) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this delete endpoint Id invalid response a status code equal to that given
+func (o *DeleteEndpointIDInvalid) IsCode(code int) bool {
+ return code == 400
+}
+
+func (o *DeleteEndpointIDInvalid) Error() string {
+ return fmt.Sprintf("[DELETE /endpoint/{id}][%d] deleteEndpointIdInvalid %+v", 400, o.Payload)
+}
+
+func (o *DeleteEndpointIDInvalid) String() string {
+ return fmt.Sprintf("[DELETE /endpoint/{id}][%d] deleteEndpointIdInvalid %+v", 400, o.Payload)
+}
+
+func (o *DeleteEndpointIDInvalid) GetPayload() models.Error {
+ return o.Payload
+}
+
+func (o *DeleteEndpointIDInvalid) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewDeleteEndpointIDForbidden creates a DeleteEndpointIDForbidden with default headers values
+func NewDeleteEndpointIDForbidden() *DeleteEndpointIDForbidden {
+ return &DeleteEndpointIDForbidden{}
+}
+
+/*
+DeleteEndpointIDForbidden describes a response with status code 403, with default header values.
+
+Forbidden
+*/
+type DeleteEndpointIDForbidden struct {
+}
+
+// IsSuccess returns true when this delete endpoint Id forbidden response has a 2xx status code
+func (o *DeleteEndpointIDForbidden) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this delete endpoint Id forbidden response has a 3xx status code
+func (o *DeleteEndpointIDForbidden) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this delete endpoint Id forbidden response has a 4xx status code
+func (o *DeleteEndpointIDForbidden) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this delete endpoint Id forbidden response has a 5xx status code
+func (o *DeleteEndpointIDForbidden) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this delete endpoint Id forbidden response a status code equal to that given
+func (o *DeleteEndpointIDForbidden) IsCode(code int) bool {
+ return code == 403
+}
+
+func (o *DeleteEndpointIDForbidden) Error() string {
+ return fmt.Sprintf("[DELETE /endpoint/{id}][%d] deleteEndpointIdForbidden ", 403)
+}
+
+func (o *DeleteEndpointIDForbidden) String() string {
+ return fmt.Sprintf("[DELETE /endpoint/{id}][%d] deleteEndpointIdForbidden ", 403)
+}
+
+func (o *DeleteEndpointIDForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
+
+// NewDeleteEndpointIDNotFound creates a DeleteEndpointIDNotFound with default headers values
+func NewDeleteEndpointIDNotFound() *DeleteEndpointIDNotFound {
+ return &DeleteEndpointIDNotFound{}
+}
+
+/*
+DeleteEndpointIDNotFound describes a response with status code 404, with default header values.
+
+Endpoint not found
+*/
+type DeleteEndpointIDNotFound struct {
+}
+
+// IsSuccess returns true when this delete endpoint Id not found response has a 2xx status code
+func (o *DeleteEndpointIDNotFound) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this delete endpoint Id not found response has a 3xx status code
+func (o *DeleteEndpointIDNotFound) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this delete endpoint Id not found response has a 4xx status code
+func (o *DeleteEndpointIDNotFound) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this delete endpoint Id not found response has a 5xx status code
+func (o *DeleteEndpointIDNotFound) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this delete endpoint Id not found response a status code equal to that given
+func (o *DeleteEndpointIDNotFound) IsCode(code int) bool {
+ return code == 404
+}
+
+func (o *DeleteEndpointIDNotFound) Error() string {
+ return fmt.Sprintf("[DELETE /endpoint/{id}][%d] deleteEndpointIdNotFound ", 404)
+}
+
+func (o *DeleteEndpointIDNotFound) String() string {
+ return fmt.Sprintf("[DELETE /endpoint/{id}][%d] deleteEndpointIdNotFound ", 404)
+}
+
+func (o *DeleteEndpointIDNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
+
+// NewDeleteEndpointIDTooManyRequests creates a DeleteEndpointIDTooManyRequests with default headers values
+func NewDeleteEndpointIDTooManyRequests() *DeleteEndpointIDTooManyRequests {
+ return &DeleteEndpointIDTooManyRequests{}
+}
+
+/*
+DeleteEndpointIDTooManyRequests describes a response with status code 429, with default header values.
+
+Rate-limiting too many requests in the given time frame
+*/
+type DeleteEndpointIDTooManyRequests struct {
+}
+
+// IsSuccess returns true when this delete endpoint Id too many requests response has a 2xx status code
+func (o *DeleteEndpointIDTooManyRequests) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this delete endpoint Id too many requests response has a 3xx status code
+func (o *DeleteEndpointIDTooManyRequests) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this delete endpoint Id too many requests response has a 4xx status code
+func (o *DeleteEndpointIDTooManyRequests) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this delete endpoint Id too many requests response has a 5xx status code
+func (o *DeleteEndpointIDTooManyRequests) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this delete endpoint Id too many requests response a status code equal to that given
+func (o *DeleteEndpointIDTooManyRequests) IsCode(code int) bool {
+ return code == 429
+}
+
+func (o *DeleteEndpointIDTooManyRequests) Error() string {
+ return fmt.Sprintf("[DELETE /endpoint/{id}][%d] deleteEndpointIdTooManyRequests ", 429)
+}
+
+func (o *DeleteEndpointIDTooManyRequests) String() string {
+ return fmt.Sprintf("[DELETE /endpoint/{id}][%d] deleteEndpointIdTooManyRequests ", 429)
+}
+
+func (o *DeleteEndpointIDTooManyRequests) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/delete_endpoint_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/delete_endpoint_parameters.go
new file mode 100644
index 000000000..8d28629fe
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/delete_endpoint_parameters.go
@@ -0,0 +1,153 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package endpoint
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/cilium/cilium/api/v1/models"
+)
+
+// NewDeleteEndpointParams creates a new DeleteEndpointParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewDeleteEndpointParams() *DeleteEndpointParams {
+ return &DeleteEndpointParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewDeleteEndpointParamsWithTimeout creates a new DeleteEndpointParams object
+// with the ability to set a timeout on a request.
+func NewDeleteEndpointParamsWithTimeout(timeout time.Duration) *DeleteEndpointParams {
+ return &DeleteEndpointParams{
+ timeout: timeout,
+ }
+}
+
+// NewDeleteEndpointParamsWithContext creates a new DeleteEndpointParams object
+// with the ability to set a context for a request.
+func NewDeleteEndpointParamsWithContext(ctx context.Context) *DeleteEndpointParams {
+ return &DeleteEndpointParams{
+ Context: ctx,
+ }
+}
+
+// NewDeleteEndpointParamsWithHTTPClient creates a new DeleteEndpointParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewDeleteEndpointParamsWithHTTPClient(client *http.Client) *DeleteEndpointParams {
+ return &DeleteEndpointParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+DeleteEndpointParams contains all the parameters to send to the API endpoint
+
+ for the delete endpoint operation.
+
+ Typically these are written to a http.Request.
+*/
+type DeleteEndpointParams struct {
+
+ // Endpoint.
+ Endpoint *models.EndpointBatchDeleteRequest
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the delete endpoint params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *DeleteEndpointParams) WithDefaults() *DeleteEndpointParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the delete endpoint params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *DeleteEndpointParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the delete endpoint params
+func (o *DeleteEndpointParams) WithTimeout(timeout time.Duration) *DeleteEndpointParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the delete endpoint params
+func (o *DeleteEndpointParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the delete endpoint params
+func (o *DeleteEndpointParams) WithContext(ctx context.Context) *DeleteEndpointParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the delete endpoint params
+func (o *DeleteEndpointParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the delete endpoint params
+func (o *DeleteEndpointParams) WithHTTPClient(client *http.Client) *DeleteEndpointParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the delete endpoint params
+func (o *DeleteEndpointParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithEndpoint adds the endpoint to the delete endpoint params
+func (o *DeleteEndpointParams) WithEndpoint(endpoint *models.EndpointBatchDeleteRequest) *DeleteEndpointParams {
+ o.SetEndpoint(endpoint)
+ return o
+}
+
+// SetEndpoint adds the endpoint to the delete endpoint params
+func (o *DeleteEndpointParams) SetEndpoint(endpoint *models.EndpointBatchDeleteRequest) {
+ o.Endpoint = endpoint
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *DeleteEndpointParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+ if o.Endpoint != nil {
+ if err := r.SetBodyParam(o.Endpoint); err != nil {
+ return err
+ }
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/delete_endpoint_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/delete_endpoint_responses.go
new file mode 100644
index 000000000..36842a86c
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/delete_endpoint_responses.go
@@ -0,0 +1,325 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package endpoint
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+)
+
+// DeleteEndpointReader is a Reader for the DeleteEndpoint structure.
+type DeleteEndpointReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *DeleteEndpointReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewDeleteEndpointOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 206:
+ result := NewDeleteEndpointErrors()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 400:
+ result := NewDeleteEndpointInvalid()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ case 404:
+ result := NewDeleteEndpointNotFound()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ case 429:
+ result := NewDeleteEndpointTooManyRequests()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ default:
+ return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code())
+ }
+}
+
+// NewDeleteEndpointOK creates a DeleteEndpointOK with default headers values
+func NewDeleteEndpointOK() *DeleteEndpointOK {
+ return &DeleteEndpointOK{}
+}
+
+/*
+DeleteEndpointOK describes a response with status code 200, with default header values.
+
+Success
+*/
+type DeleteEndpointOK struct {
+}
+
+// IsSuccess returns true when this delete endpoint o k response has a 2xx status code
+func (o *DeleteEndpointOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this delete endpoint o k response has a 3xx status code
+func (o *DeleteEndpointOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this delete endpoint o k response has a 4xx status code
+func (o *DeleteEndpointOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this delete endpoint o k response has a 5xx status code
+func (o *DeleteEndpointOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this delete endpoint o k response a status code equal to that given
+func (o *DeleteEndpointOK) IsCode(code int) bool {
+ return code == 200
+}
+
+func (o *DeleteEndpointOK) Error() string {
+ return fmt.Sprintf("[DELETE /endpoint][%d] deleteEndpointOK ", 200)
+}
+
+func (o *DeleteEndpointOK) String() string {
+ return fmt.Sprintf("[DELETE /endpoint][%d] deleteEndpointOK ", 200)
+}
+
+func (o *DeleteEndpointOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
+
+// NewDeleteEndpointErrors creates a DeleteEndpointErrors with default headers values
+func NewDeleteEndpointErrors() *DeleteEndpointErrors {
+ return &DeleteEndpointErrors{}
+}
+
+/*
+DeleteEndpointErrors describes a response with status code 206, with default header values.
+
+Deleted with a number of errors encountered
+*/
+type DeleteEndpointErrors struct {
+ Payload int64
+}
+
+// IsSuccess returns true when this delete endpoint errors response has a 2xx status code
+func (o *DeleteEndpointErrors) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this delete endpoint errors response has a 3xx status code
+func (o *DeleteEndpointErrors) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this delete endpoint errors response has a 4xx status code
+func (o *DeleteEndpointErrors) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this delete endpoint errors response has a 5xx status code
+func (o *DeleteEndpointErrors) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this delete endpoint errors response a status code equal to that given
+func (o *DeleteEndpointErrors) IsCode(code int) bool {
+ return code == 206
+}
+
+func (o *DeleteEndpointErrors) Error() string {
+ return fmt.Sprintf("[DELETE /endpoint][%d] deleteEndpointErrors %+v", 206, o.Payload)
+}
+
+func (o *DeleteEndpointErrors) String() string {
+ return fmt.Sprintf("[DELETE /endpoint][%d] deleteEndpointErrors %+v", 206, o.Payload)
+}
+
+func (o *DeleteEndpointErrors) GetPayload() int64 {
+ return o.Payload
+}
+
+func (o *DeleteEndpointErrors) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewDeleteEndpointInvalid creates a DeleteEndpointInvalid with default headers values
+func NewDeleteEndpointInvalid() *DeleteEndpointInvalid {
+ return &DeleteEndpointInvalid{}
+}
+
+/*
+DeleteEndpointInvalid describes a response with status code 400, with default header values.
+
+Invalid endpoint delete request
+*/
+type DeleteEndpointInvalid struct {
+}
+
+// IsSuccess returns true when this delete endpoint invalid response has a 2xx status code
+func (o *DeleteEndpointInvalid) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this delete endpoint invalid response has a 3xx status code
+func (o *DeleteEndpointInvalid) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this delete endpoint invalid response has a 4xx status code
+func (o *DeleteEndpointInvalid) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this delete endpoint invalid response has a 5xx status code
+func (o *DeleteEndpointInvalid) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this delete endpoint invalid response a status code equal to that given
+func (o *DeleteEndpointInvalid) IsCode(code int) bool {
+ return code == 400
+}
+
+func (o *DeleteEndpointInvalid) Error() string {
+ return fmt.Sprintf("[DELETE /endpoint][%d] deleteEndpointInvalid ", 400)
+}
+
+func (o *DeleteEndpointInvalid) String() string {
+ return fmt.Sprintf("[DELETE /endpoint][%d] deleteEndpointInvalid ", 400)
+}
+
+func (o *DeleteEndpointInvalid) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
+
+// NewDeleteEndpointNotFound creates a DeleteEndpointNotFound with default headers values
+func NewDeleteEndpointNotFound() *DeleteEndpointNotFound {
+ return &DeleteEndpointNotFound{}
+}
+
+/*
+DeleteEndpointNotFound describes a response with status code 404, with default header values.
+
+No endpoints with provided parameters found
+*/
+type DeleteEndpointNotFound struct {
+}
+
+// IsSuccess returns true when this delete endpoint not found response has a 2xx status code
+func (o *DeleteEndpointNotFound) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this delete endpoint not found response has a 3xx status code
+func (o *DeleteEndpointNotFound) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this delete endpoint not found response has a 4xx status code
+func (o *DeleteEndpointNotFound) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this delete endpoint not found response has a 5xx status code
+func (o *DeleteEndpointNotFound) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this delete endpoint not found response a status code equal to that given
+func (o *DeleteEndpointNotFound) IsCode(code int) bool {
+ return code == 404
+}
+
+func (o *DeleteEndpointNotFound) Error() string {
+ return fmt.Sprintf("[DELETE /endpoint][%d] deleteEndpointNotFound ", 404)
+}
+
+func (o *DeleteEndpointNotFound) String() string {
+ return fmt.Sprintf("[DELETE /endpoint][%d] deleteEndpointNotFound ", 404)
+}
+
+func (o *DeleteEndpointNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
+
+// NewDeleteEndpointTooManyRequests creates a DeleteEndpointTooManyRequests with default headers values
+func NewDeleteEndpointTooManyRequests() *DeleteEndpointTooManyRequests {
+ return &DeleteEndpointTooManyRequests{}
+}
+
+/*
+DeleteEndpointTooManyRequests describes a response with status code 429, with default header values.
+
+Rate-limiting too many requests in the given time frame
+*/
+type DeleteEndpointTooManyRequests struct {
+}
+
+// IsSuccess returns true when this delete endpoint too many requests response has a 2xx status code
+func (o *DeleteEndpointTooManyRequests) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this delete endpoint too many requests response has a 3xx status code
+func (o *DeleteEndpointTooManyRequests) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this delete endpoint too many requests response has a 4xx status code
+func (o *DeleteEndpointTooManyRequests) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this delete endpoint too many requests response has a 5xx status code
+func (o *DeleteEndpointTooManyRequests) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this delete endpoint too many requests response a status code equal to that given
+func (o *DeleteEndpointTooManyRequests) IsCode(code int) bool {
+ return code == 429
+}
+
+func (o *DeleteEndpointTooManyRequests) Error() string {
+ return fmt.Sprintf("[DELETE /endpoint][%d] deleteEndpointTooManyRequests ", 429)
+}
+
+func (o *DeleteEndpointTooManyRequests) String() string {
+ return fmt.Sprintf("[DELETE /endpoint][%d] deleteEndpointTooManyRequests ", 429)
+}
+
+func (o *DeleteEndpointTooManyRequests) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/endpoint_client.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/endpoint_client.go
new file mode 100644
index 000000000..72f7b68ea
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/endpoint_client.go
@@ -0,0 +1,555 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package endpoint
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+)
+
+// New creates a new endpoint API client.
+func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService {
+ return &Client{transport: transport, formats: formats}
+}
+
+/*
+Client for endpoint API
+*/
+type Client struct {
+ transport runtime.ClientTransport
+ formats strfmt.Registry
+}
+
+// ClientOption is the option for Client methods
+type ClientOption func(*runtime.ClientOperation)
+
+// ClientService is the interface for Client methods
+type ClientService interface {
+ DeleteEndpoint(params *DeleteEndpointParams, opts ...ClientOption) (*DeleteEndpointOK, *DeleteEndpointErrors, error)
+
+ DeleteEndpointID(params *DeleteEndpointIDParams, opts ...ClientOption) (*DeleteEndpointIDOK, *DeleteEndpointIDErrors, error)
+
+ GetEndpoint(params *GetEndpointParams, opts ...ClientOption) (*GetEndpointOK, error)
+
+ GetEndpointID(params *GetEndpointIDParams, opts ...ClientOption) (*GetEndpointIDOK, error)
+
+ GetEndpointIDConfig(params *GetEndpointIDConfigParams, opts ...ClientOption) (*GetEndpointIDConfigOK, error)
+
+ GetEndpointIDHealthz(params *GetEndpointIDHealthzParams, opts ...ClientOption) (*GetEndpointIDHealthzOK, error)
+
+ GetEndpointIDLabels(params *GetEndpointIDLabelsParams, opts ...ClientOption) (*GetEndpointIDLabelsOK, error)
+
+ GetEndpointIDLog(params *GetEndpointIDLogParams, opts ...ClientOption) (*GetEndpointIDLogOK, error)
+
+ PatchEndpointID(params *PatchEndpointIDParams, opts ...ClientOption) (*PatchEndpointIDOK, error)
+
+ PatchEndpointIDConfig(params *PatchEndpointIDConfigParams, opts ...ClientOption) (*PatchEndpointIDConfigOK, error)
+
+ PatchEndpointIDLabels(params *PatchEndpointIDLabelsParams, opts ...ClientOption) (*PatchEndpointIDLabelsOK, error)
+
+ PutEndpointID(params *PutEndpointIDParams, opts ...ClientOption) (*PutEndpointIDCreated, error)
+
+ SetTransport(transport runtime.ClientTransport)
+}
+
+/*
+DeleteEndpoint deletes a list of endpoints
+
+Deletes a list of endpoints that have endpoints matching the provided properties
+*/
+func (a *Client) DeleteEndpoint(params *DeleteEndpointParams, opts ...ClientOption) (*DeleteEndpointOK, *DeleteEndpointErrors, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewDeleteEndpointParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "DeleteEndpoint",
+ Method: "DELETE",
+ PathPattern: "/endpoint",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &DeleteEndpointReader{formats: a.formats},
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, nil, err
+ }
+ switch value := result.(type) {
+ case *DeleteEndpointOK:
+ return value, nil, nil
+ case *DeleteEndpointErrors:
+ return nil, value, nil
+ }
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for endpoint: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+/*
+ DeleteEndpointID deletes endpoint
+
+ Deletes the endpoint specified by the ID. Deletion is imminent and
+
+atomic, if the deletion request is valid and the endpoint exists,
+deletion will occur even if errors are encountered in the process. If
+errors have been encountered, the code 202 will be returned, otherwise
+200 on success.
+
+All resources associated with the endpoint will be freed and the
+workload represented by the endpoint will be disconnected.It will no
+longer be able to initiate or receive communications of any sort.
+*/
+func (a *Client) DeleteEndpointID(params *DeleteEndpointIDParams, opts ...ClientOption) (*DeleteEndpointIDOK, *DeleteEndpointIDErrors, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewDeleteEndpointIDParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "DeleteEndpointID",
+ Method: "DELETE",
+ PathPattern: "/endpoint/{id}",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &DeleteEndpointIDReader{formats: a.formats},
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, nil, err
+ }
+ switch value := result.(type) {
+ case *DeleteEndpointIDOK:
+ return value, nil, nil
+ case *DeleteEndpointIDErrors:
+ return nil, value, nil
+ }
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for endpoint: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+/*
+GetEndpoint retrieves a list of endpoints that have metadata matching the provided parameters
+
+Retrieves a list of endpoints that have metadata matching the provided parameters, or all endpoints if no parameters provided.
+*/
+func (a *Client) GetEndpoint(params *GetEndpointParams, opts ...ClientOption) (*GetEndpointOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewGetEndpointParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "GetEndpoint",
+ Method: "GET",
+ PathPattern: "/endpoint",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &GetEndpointReader{formats: a.formats},
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*GetEndpointOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for GetEndpoint: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+/*
+GetEndpointID gets endpoint by endpoint ID
+
+Returns endpoint information
+*/
+func (a *Client) GetEndpointID(params *GetEndpointIDParams, opts ...ClientOption) (*GetEndpointIDOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewGetEndpointIDParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "GetEndpointID",
+ Method: "GET",
+ PathPattern: "/endpoint/{id}",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &GetEndpointIDReader{formats: a.formats},
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*GetEndpointIDOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for GetEndpointID: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+/*
+GetEndpointIDConfig retrieves endpoint configuration
+
+Retrieves the configuration of the specified endpoint.
+*/
+func (a *Client) GetEndpointIDConfig(params *GetEndpointIDConfigParams, opts ...ClientOption) (*GetEndpointIDConfigOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewGetEndpointIDConfigParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "GetEndpointIDConfig",
+ Method: "GET",
+ PathPattern: "/endpoint/{id}/config",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &GetEndpointIDConfigReader{formats: a.formats},
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*GetEndpointIDConfigOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for GetEndpointIDConfig: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+/*
+GetEndpointIDHealthz retrieves the status logs associated with this endpoint
+*/
+func (a *Client) GetEndpointIDHealthz(params *GetEndpointIDHealthzParams, opts ...ClientOption) (*GetEndpointIDHealthzOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewGetEndpointIDHealthzParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "GetEndpointIDHealthz",
+ Method: "GET",
+ PathPattern: "/endpoint/{id}/healthz",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &GetEndpointIDHealthzReader{formats: a.formats},
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*GetEndpointIDHealthzOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for GetEndpointIDHealthz: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+/*
+GetEndpointIDLabels retrieves the list of labels associated with an endpoint
+*/
+func (a *Client) GetEndpointIDLabels(params *GetEndpointIDLabelsParams, opts ...ClientOption) (*GetEndpointIDLabelsOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewGetEndpointIDLabelsParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "GetEndpointIDLabels",
+ Method: "GET",
+ PathPattern: "/endpoint/{id}/labels",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &GetEndpointIDLabelsReader{formats: a.formats},
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*GetEndpointIDLabelsOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for GetEndpointIDLabels: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+/*
+GetEndpointIDLog retrieves the status logs associated with this endpoint
+*/
+func (a *Client) GetEndpointIDLog(params *GetEndpointIDLogParams, opts ...ClientOption) (*GetEndpointIDLogOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewGetEndpointIDLogParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "GetEndpointIDLog",
+ Method: "GET",
+ PathPattern: "/endpoint/{id}/log",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &GetEndpointIDLogReader{formats: a.formats},
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*GetEndpointIDLogOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for GetEndpointIDLog: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+/*
+PatchEndpointID modifies existing endpoint
+
+Applies the endpoint change request to an existing endpoint
+*/
+func (a *Client) PatchEndpointID(params *PatchEndpointIDParams, opts ...ClientOption) (*PatchEndpointIDOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewPatchEndpointIDParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "PatchEndpointID",
+ Method: "PATCH",
+ PathPattern: "/endpoint/{id}",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &PatchEndpointIDReader{formats: a.formats},
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*PatchEndpointIDOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for PatchEndpointID: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+/*
+ PatchEndpointIDConfig modifies mutable endpoint configuration
+
+ Update the configuration of an existing endpoint and regenerates &
+
+recompiles the corresponding programs automatically.
+*/
+func (a *Client) PatchEndpointIDConfig(params *PatchEndpointIDConfigParams, opts ...ClientOption) (*PatchEndpointIDConfigOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewPatchEndpointIDConfigParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "PatchEndpointIDConfig",
+ Method: "PATCH",
+ PathPattern: "/endpoint/{id}/config",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &PatchEndpointIDConfigReader{formats: a.formats},
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*PatchEndpointIDConfigOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for PatchEndpointIDConfig: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+/*
+ PatchEndpointIDLabels sets label configuration of endpoint
+
+ Sets labels associated with an endpoint. These can be user provided or
+
+derived from the orchestration system.
+*/
+func (a *Client) PatchEndpointIDLabels(params *PatchEndpointIDLabelsParams, opts ...ClientOption) (*PatchEndpointIDLabelsOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewPatchEndpointIDLabelsParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "PatchEndpointIDLabels",
+ Method: "PATCH",
+ PathPattern: "/endpoint/{id}/labels",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &PatchEndpointIDLabelsReader{formats: a.formats},
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*PatchEndpointIDLabelsOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for PatchEndpointIDLabels: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+/*
+PutEndpointID creates endpoint
+
+Creates a new endpoint
+*/
+func (a *Client) PutEndpointID(params *PutEndpointIDParams, opts ...ClientOption) (*PutEndpointIDCreated, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewPutEndpointIDParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "PutEndpointID",
+ Method: "PUT",
+ PathPattern: "/endpoint/{id}",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &PutEndpointIDReader{formats: a.formats},
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*PutEndpointIDCreated)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for PutEndpointID: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+// SetTransport changes the transport on the client
+func (a *Client) SetTransport(transport runtime.ClientTransport) {
+ a.transport = transport
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_config_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_config_parameters.go
new file mode 100644
index 000000000..971bf69f0
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_config_parameters.go
@@ -0,0 +1,168 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package endpoint
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewGetEndpointIDConfigParams creates a new GetEndpointIDConfigParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewGetEndpointIDConfigParams() *GetEndpointIDConfigParams {
+ return &GetEndpointIDConfigParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewGetEndpointIDConfigParamsWithTimeout creates a new GetEndpointIDConfigParams object
+// with the ability to set a timeout on a request.
+func NewGetEndpointIDConfigParamsWithTimeout(timeout time.Duration) *GetEndpointIDConfigParams {
+ return &GetEndpointIDConfigParams{
+ timeout: timeout,
+ }
+}
+
+// NewGetEndpointIDConfigParamsWithContext creates a new GetEndpointIDConfigParams object
+// with the ability to set a context for a request.
+func NewGetEndpointIDConfigParamsWithContext(ctx context.Context) *GetEndpointIDConfigParams {
+ return &GetEndpointIDConfigParams{
+ Context: ctx,
+ }
+}
+
+// NewGetEndpointIDConfigParamsWithHTTPClient creates a new GetEndpointIDConfigParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewGetEndpointIDConfigParamsWithHTTPClient(client *http.Client) *GetEndpointIDConfigParams {
+ return &GetEndpointIDConfigParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+GetEndpointIDConfigParams contains all the parameters to send to the API endpoint
+
+ for the get endpoint ID config operation.
+
+ Typically these are written to a http.Request.
+*/
+type GetEndpointIDConfigParams struct {
+
+ /* ID.
+
+ String describing an endpoint with the format ``[prefix:]id``. If no prefix
+ is specified, a prefix of ``cilium-local:`` is assumed. Not all endpoints
+ will be addressable by all endpoint ID prefixes with the exception of the
+ local Cilium UUID which is assigned to all endpoints.
+
+ Supported endpoint id prefixes:
+ - cilium-local: Local Cilium endpoint UUID, e.g. cilium-local:3389595
+ - cilium-global: Global Cilium endpoint UUID, e.g. cilium-global:cluster1:nodeX:452343
+ - cni-attachment-id: CNI attachment ID, e.g. cni-attachment-id:22222:eth0
+ - container-id: Container runtime ID, e.g. container-id:22222 (deprecated, may not be unique)
+ - container-name: Container name, e.g. container-name:foobar (deprecated, may not be unique)
+ - pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar (deprecated, may not be unique)
+ - cep-name: cep name for this container if K8s is enabled, e.g. pod-name:default:foobar-net1
+ - docker-endpoint: Docker libnetwork endpoint ID, e.g. docker-endpoint:4444
+
+ */
+ ID string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the get endpoint ID config params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetEndpointIDConfigParams) WithDefaults() *GetEndpointIDConfigParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the get endpoint ID config params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetEndpointIDConfigParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the get endpoint ID config params
+func (o *GetEndpointIDConfigParams) WithTimeout(timeout time.Duration) *GetEndpointIDConfigParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the get endpoint ID config params
+func (o *GetEndpointIDConfigParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the get endpoint ID config params
+func (o *GetEndpointIDConfigParams) WithContext(ctx context.Context) *GetEndpointIDConfigParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the get endpoint ID config params
+func (o *GetEndpointIDConfigParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the get endpoint ID config params
+func (o *GetEndpointIDConfigParams) WithHTTPClient(client *http.Client) *GetEndpointIDConfigParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the get endpoint ID config params
+func (o *GetEndpointIDConfigParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithID adds the id to the get endpoint ID config params
+func (o *GetEndpointIDConfigParams) WithID(id string) *GetEndpointIDConfigParams {
+ o.SetID(id)
+ return o
+}
+
+// SetID adds the id to the get endpoint ID config params
+func (o *GetEndpointIDConfigParams) SetID(id string) {
+ o.ID = id
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *GetEndpointIDConfigParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ // path param id
+ if err := r.SetPathParam("id", o.ID); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_config_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_config_responses.go
new file mode 100644
index 000000000..e16cdfbf5
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_config_responses.go
@@ -0,0 +1,215 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package endpoint
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/cilium/cilium/api/v1/models"
+)
+
+// GetEndpointIDConfigReader is a Reader for the GetEndpointIDConfig structure.
+type GetEndpointIDConfigReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *GetEndpointIDConfigReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewGetEndpointIDConfigOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 404:
+ result := NewGetEndpointIDConfigNotFound()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ case 429:
+ result := NewGetEndpointIDConfigTooManyRequests()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ default:
+ return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code())
+ }
+}
+
+// NewGetEndpointIDConfigOK creates a GetEndpointIDConfigOK with default headers values
+func NewGetEndpointIDConfigOK() *GetEndpointIDConfigOK {
+ return &GetEndpointIDConfigOK{}
+}
+
+/*
+GetEndpointIDConfigOK describes a response with status code 200, with default header values.
+
+Success
+*/
+type GetEndpointIDConfigOK struct {
+ Payload *models.EndpointConfigurationStatus
+}
+
+// IsSuccess returns true when this get endpoint Id config o k response has a 2xx status code
+func (o *GetEndpointIDConfigOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this get endpoint Id config o k response has a 3xx status code
+func (o *GetEndpointIDConfigOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get endpoint Id config o k response has a 4xx status code
+func (o *GetEndpointIDConfigOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get endpoint Id config o k response has a 5xx status code
+func (o *GetEndpointIDConfigOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get endpoint Id config o k response a status code equal to that given
+func (o *GetEndpointIDConfigOK) IsCode(code int) bool {
+ return code == 200
+}
+
+func (o *GetEndpointIDConfigOK) Error() string {
+ return fmt.Sprintf("[GET /endpoint/{id}/config][%d] getEndpointIdConfigOK %+v", 200, o.Payload)
+}
+
+func (o *GetEndpointIDConfigOK) String() string {
+ return fmt.Sprintf("[GET /endpoint/{id}/config][%d] getEndpointIdConfigOK %+v", 200, o.Payload)
+}
+
+func (o *GetEndpointIDConfigOK) GetPayload() *models.EndpointConfigurationStatus {
+ return o.Payload
+}
+
+func (o *GetEndpointIDConfigOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ o.Payload = new(models.EndpointConfigurationStatus)
+
+ // response payload
+ if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewGetEndpointIDConfigNotFound creates a GetEndpointIDConfigNotFound with default headers values
+func NewGetEndpointIDConfigNotFound() *GetEndpointIDConfigNotFound {
+ return &GetEndpointIDConfigNotFound{}
+}
+
+/*
+GetEndpointIDConfigNotFound describes a response with status code 404, with default header values.
+
+Endpoint not found
+*/
+type GetEndpointIDConfigNotFound struct {
+}
+
+// IsSuccess returns true when this get endpoint Id config not found response has a 2xx status code
+func (o *GetEndpointIDConfigNotFound) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this get endpoint Id config not found response has a 3xx status code
+func (o *GetEndpointIDConfigNotFound) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get endpoint Id config not found response has a 4xx status code
+func (o *GetEndpointIDConfigNotFound) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this get endpoint Id config not found response has a 5xx status code
+func (o *GetEndpointIDConfigNotFound) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get endpoint Id config not found response a status code equal to that given
+func (o *GetEndpointIDConfigNotFound) IsCode(code int) bool {
+ return code == 404
+}
+
+func (o *GetEndpointIDConfigNotFound) Error() string {
+ return fmt.Sprintf("[GET /endpoint/{id}/config][%d] getEndpointIdConfigNotFound ", 404)
+}
+
+func (o *GetEndpointIDConfigNotFound) String() string {
+ return fmt.Sprintf("[GET /endpoint/{id}/config][%d] getEndpointIdConfigNotFound ", 404)
+}
+
+func (o *GetEndpointIDConfigNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
+
+// NewGetEndpointIDConfigTooManyRequests creates a GetEndpointIDConfigTooManyRequests with default headers values
+func NewGetEndpointIDConfigTooManyRequests() *GetEndpointIDConfigTooManyRequests {
+ return &GetEndpointIDConfigTooManyRequests{}
+}
+
+/*
+GetEndpointIDConfigTooManyRequests describes a response with status code 429, with default header values.
+
+Rate-limiting too many requests in the given time frame
+*/
+type GetEndpointIDConfigTooManyRequests struct {
+}
+
+// IsSuccess returns true when this get endpoint Id config too many requests response has a 2xx status code
+func (o *GetEndpointIDConfigTooManyRequests) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this get endpoint Id config too many requests response has a 3xx status code
+func (o *GetEndpointIDConfigTooManyRequests) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get endpoint Id config too many requests response has a 4xx status code
+func (o *GetEndpointIDConfigTooManyRequests) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this get endpoint Id config too many requests response has a 5xx status code
+func (o *GetEndpointIDConfigTooManyRequests) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get endpoint Id config too many requests response a status code equal to that given
+func (o *GetEndpointIDConfigTooManyRequests) IsCode(code int) bool {
+ return code == 429
+}
+
+func (o *GetEndpointIDConfigTooManyRequests) Error() string {
+ return fmt.Sprintf("[GET /endpoint/{id}/config][%d] getEndpointIdConfigTooManyRequests ", 429)
+}
+
+func (o *GetEndpointIDConfigTooManyRequests) String() string {
+ return fmt.Sprintf("[GET /endpoint/{id}/config][%d] getEndpointIdConfigTooManyRequests ", 429)
+}
+
+func (o *GetEndpointIDConfigTooManyRequests) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_healthz_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_healthz_parameters.go
new file mode 100644
index 000000000..b211692ce
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_healthz_parameters.go
@@ -0,0 +1,168 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package endpoint
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewGetEndpointIDHealthzParams creates a new GetEndpointIDHealthzParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewGetEndpointIDHealthzParams() *GetEndpointIDHealthzParams {
+ return &GetEndpointIDHealthzParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewGetEndpointIDHealthzParamsWithTimeout creates a new GetEndpointIDHealthzParams object
+// with the ability to set a timeout on a request.
+func NewGetEndpointIDHealthzParamsWithTimeout(timeout time.Duration) *GetEndpointIDHealthzParams {
+ return &GetEndpointIDHealthzParams{
+ timeout: timeout,
+ }
+}
+
+// NewGetEndpointIDHealthzParamsWithContext creates a new GetEndpointIDHealthzParams object
+// with the ability to set a context for a request.
+func NewGetEndpointIDHealthzParamsWithContext(ctx context.Context) *GetEndpointIDHealthzParams {
+ return &GetEndpointIDHealthzParams{
+ Context: ctx,
+ }
+}
+
+// NewGetEndpointIDHealthzParamsWithHTTPClient creates a new GetEndpointIDHealthzParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewGetEndpointIDHealthzParamsWithHTTPClient(client *http.Client) *GetEndpointIDHealthzParams {
+ return &GetEndpointIDHealthzParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+GetEndpointIDHealthzParams contains all the parameters to send to the API endpoint
+
+ for the get endpoint ID healthz operation.
+
+ Typically these are written to a http.Request.
+*/
+type GetEndpointIDHealthzParams struct {
+
+ /* ID.
+
+ String describing an endpoint with the format ``[prefix:]id``. If no prefix
+ is specified, a prefix of ``cilium-local:`` is assumed. Not all endpoints
+ will be addressable by all endpoint ID prefixes with the exception of the
+ local Cilium UUID which is assigned to all endpoints.
+
+ Supported endpoint id prefixes:
+ - cilium-local: Local Cilium endpoint UUID, e.g. cilium-local:3389595
+ - cilium-global: Global Cilium endpoint UUID, e.g. cilium-global:cluster1:nodeX:452343
+ - cni-attachment-id: CNI attachment ID, e.g. cni-attachment-id:22222:eth0
+ - container-id: Container runtime ID, e.g. container-id:22222 (deprecated, may not be unique)
+ - container-name: Container name, e.g. container-name:foobar (deprecated, may not be unique)
+ - pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar (deprecated, may not be unique)
+ - cep-name: cep name for this container if K8s is enabled, e.g. pod-name:default:foobar-net1
+ - docker-endpoint: Docker libnetwork endpoint ID, e.g. docker-endpoint:4444
+
+ */
+ ID string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the get endpoint ID healthz params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetEndpointIDHealthzParams) WithDefaults() *GetEndpointIDHealthzParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the get endpoint ID healthz params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetEndpointIDHealthzParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the get endpoint ID healthz params
+func (o *GetEndpointIDHealthzParams) WithTimeout(timeout time.Duration) *GetEndpointIDHealthzParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the get endpoint ID healthz params
+func (o *GetEndpointIDHealthzParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the get endpoint ID healthz params
+func (o *GetEndpointIDHealthzParams) WithContext(ctx context.Context) *GetEndpointIDHealthzParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the get endpoint ID healthz params
+func (o *GetEndpointIDHealthzParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the get endpoint ID healthz params
+func (o *GetEndpointIDHealthzParams) WithHTTPClient(client *http.Client) *GetEndpointIDHealthzParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the get endpoint ID healthz params
+func (o *GetEndpointIDHealthzParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithID adds the id to the get endpoint ID healthz params
+func (o *GetEndpointIDHealthzParams) WithID(id string) *GetEndpointIDHealthzParams {
+ o.SetID(id)
+ return o
+}
+
+// SetID adds the id to the get endpoint ID healthz params
+func (o *GetEndpointIDHealthzParams) SetID(id string) {
+ o.ID = id
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *GetEndpointIDHealthzParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ // path param id
+ if err := r.SetPathParam("id", o.ID); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_healthz_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_healthz_responses.go
new file mode 100644
index 000000000..a7e976534
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_healthz_responses.go
@@ -0,0 +1,272 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package endpoint
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/cilium/cilium/api/v1/models"
+)
+
+// GetEndpointIDHealthzReader is a Reader for the GetEndpointIDHealthz structure.
+type GetEndpointIDHealthzReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *GetEndpointIDHealthzReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewGetEndpointIDHealthzOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 400:
+ result := NewGetEndpointIDHealthzInvalid()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ case 404:
+ result := NewGetEndpointIDHealthzNotFound()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ case 429:
+ result := NewGetEndpointIDHealthzTooManyRequests()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ default:
+ return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code())
+ }
+}
+
+// NewGetEndpointIDHealthzOK creates a GetEndpointIDHealthzOK with default headers values
+func NewGetEndpointIDHealthzOK() *GetEndpointIDHealthzOK {
+ return &GetEndpointIDHealthzOK{}
+}
+
+/*
+GetEndpointIDHealthzOK describes a response with status code 200, with default header values.
+
+Success
+*/
+type GetEndpointIDHealthzOK struct {
+ Payload *models.EndpointHealth
+}
+
+// IsSuccess returns true when this get endpoint Id healthz o k response has a 2xx status code
+func (o *GetEndpointIDHealthzOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this get endpoint Id healthz o k response has a 3xx status code
+func (o *GetEndpointIDHealthzOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get endpoint Id healthz o k response has a 4xx status code
+func (o *GetEndpointIDHealthzOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get endpoint Id healthz o k response has a 5xx status code
+func (o *GetEndpointIDHealthzOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get endpoint Id healthz o k response a status code equal to that given
+func (o *GetEndpointIDHealthzOK) IsCode(code int) bool {
+ return code == 200
+}
+
+func (o *GetEndpointIDHealthzOK) Error() string {
+ return fmt.Sprintf("[GET /endpoint/{id}/healthz][%d] getEndpointIdHealthzOK %+v", 200, o.Payload)
+}
+
+func (o *GetEndpointIDHealthzOK) String() string {
+ return fmt.Sprintf("[GET /endpoint/{id}/healthz][%d] getEndpointIdHealthzOK %+v", 200, o.Payload)
+}
+
+func (o *GetEndpointIDHealthzOK) GetPayload() *models.EndpointHealth {
+ return o.Payload
+}
+
+func (o *GetEndpointIDHealthzOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ o.Payload = new(models.EndpointHealth)
+
+ // response payload
+ if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewGetEndpointIDHealthzInvalid creates a GetEndpointIDHealthzInvalid with default headers values
+func NewGetEndpointIDHealthzInvalid() *GetEndpointIDHealthzInvalid {
+ return &GetEndpointIDHealthzInvalid{}
+}
+
+/*
+GetEndpointIDHealthzInvalid describes a response with status code 400, with default header values.
+
+Invalid identity provided
+*/
+type GetEndpointIDHealthzInvalid struct {
+}
+
+// IsSuccess returns true when this get endpoint Id healthz invalid response has a 2xx status code
+func (o *GetEndpointIDHealthzInvalid) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this get endpoint Id healthz invalid response has a 3xx status code
+func (o *GetEndpointIDHealthzInvalid) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get endpoint Id healthz invalid response has a 4xx status code
+func (o *GetEndpointIDHealthzInvalid) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this get endpoint Id healthz invalid response has a 5xx status code
+func (o *GetEndpointIDHealthzInvalid) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get endpoint Id healthz invalid response a status code equal to that given
+func (o *GetEndpointIDHealthzInvalid) IsCode(code int) bool {
+ return code == 400
+}
+
+func (o *GetEndpointIDHealthzInvalid) Error() string {
+ return fmt.Sprintf("[GET /endpoint/{id}/healthz][%d] getEndpointIdHealthzInvalid ", 400)
+}
+
+func (o *GetEndpointIDHealthzInvalid) String() string {
+ return fmt.Sprintf("[GET /endpoint/{id}/healthz][%d] getEndpointIdHealthzInvalid ", 400)
+}
+
+func (o *GetEndpointIDHealthzInvalid) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
+
+// NewGetEndpointIDHealthzNotFound creates a GetEndpointIDHealthzNotFound with default headers values
+func NewGetEndpointIDHealthzNotFound() *GetEndpointIDHealthzNotFound {
+ return &GetEndpointIDHealthzNotFound{}
+}
+
+/*
+GetEndpointIDHealthzNotFound describes a response with status code 404, with default header values.
+
+Endpoint not found
+*/
+type GetEndpointIDHealthzNotFound struct {
+}
+
+// IsSuccess returns true when this get endpoint Id healthz not found response has a 2xx status code
+func (o *GetEndpointIDHealthzNotFound) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this get endpoint Id healthz not found response has a 3xx status code
+func (o *GetEndpointIDHealthzNotFound) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get endpoint Id healthz not found response has a 4xx status code
+func (o *GetEndpointIDHealthzNotFound) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this get endpoint Id healthz not found response has a 5xx status code
+func (o *GetEndpointIDHealthzNotFound) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get endpoint Id healthz not found response a status code equal to that given
+func (o *GetEndpointIDHealthzNotFound) IsCode(code int) bool {
+ return code == 404
+}
+
+func (o *GetEndpointIDHealthzNotFound) Error() string {
+ return fmt.Sprintf("[GET /endpoint/{id}/healthz][%d] getEndpointIdHealthzNotFound ", 404)
+}
+
+func (o *GetEndpointIDHealthzNotFound) String() string {
+ return fmt.Sprintf("[GET /endpoint/{id}/healthz][%d] getEndpointIdHealthzNotFound ", 404)
+}
+
+func (o *GetEndpointIDHealthzNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
+
+// NewGetEndpointIDHealthzTooManyRequests creates a GetEndpointIDHealthzTooManyRequests with default headers values
+func NewGetEndpointIDHealthzTooManyRequests() *GetEndpointIDHealthzTooManyRequests {
+ return &GetEndpointIDHealthzTooManyRequests{}
+}
+
+/*
+GetEndpointIDHealthzTooManyRequests describes a response with status code 429, with default header values.
+
+Rate-limiting too many requests in the given time frame
+*/
+type GetEndpointIDHealthzTooManyRequests struct {
+}
+
+// IsSuccess returns true when this get endpoint Id healthz too many requests response has a 2xx status code
+func (o *GetEndpointIDHealthzTooManyRequests) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this get endpoint Id healthz too many requests response has a 3xx status code
+func (o *GetEndpointIDHealthzTooManyRequests) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get endpoint Id healthz too many requests response has a 4xx status code
+func (o *GetEndpointIDHealthzTooManyRequests) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this get endpoint Id healthz too many requests response has a 5xx status code
+func (o *GetEndpointIDHealthzTooManyRequests) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get endpoint Id healthz too many requests response a status code equal to that given
+func (o *GetEndpointIDHealthzTooManyRequests) IsCode(code int) bool {
+ return code == 429
+}
+
+func (o *GetEndpointIDHealthzTooManyRequests) Error() string {
+ return fmt.Sprintf("[GET /endpoint/{id}/healthz][%d] getEndpointIdHealthzTooManyRequests ", 429)
+}
+
+func (o *GetEndpointIDHealthzTooManyRequests) String() string {
+ return fmt.Sprintf("[GET /endpoint/{id}/healthz][%d] getEndpointIdHealthzTooManyRequests ", 429)
+}
+
+func (o *GetEndpointIDHealthzTooManyRequests) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_labels_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_labels_parameters.go
new file mode 100644
index 000000000..27dc404ab
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_labels_parameters.go
@@ -0,0 +1,168 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package endpoint
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewGetEndpointIDLabelsParams creates a new GetEndpointIDLabelsParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewGetEndpointIDLabelsParams() *GetEndpointIDLabelsParams {
+ return &GetEndpointIDLabelsParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewGetEndpointIDLabelsParamsWithTimeout creates a new GetEndpointIDLabelsParams object
+// with the ability to set a timeout on a request.
+func NewGetEndpointIDLabelsParamsWithTimeout(timeout time.Duration) *GetEndpointIDLabelsParams {
+ return &GetEndpointIDLabelsParams{
+ timeout: timeout,
+ }
+}
+
+// NewGetEndpointIDLabelsParamsWithContext creates a new GetEndpointIDLabelsParams object
+// with the ability to set a context for a request.
+func NewGetEndpointIDLabelsParamsWithContext(ctx context.Context) *GetEndpointIDLabelsParams {
+ return &GetEndpointIDLabelsParams{
+ Context: ctx,
+ }
+}
+
+// NewGetEndpointIDLabelsParamsWithHTTPClient creates a new GetEndpointIDLabelsParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewGetEndpointIDLabelsParamsWithHTTPClient(client *http.Client) *GetEndpointIDLabelsParams {
+ return &GetEndpointIDLabelsParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+GetEndpointIDLabelsParams contains all the parameters to send to the API endpoint
+
+ for the get endpoint ID labels operation.
+
+ Typically these are written to a http.Request.
+*/
+type GetEndpointIDLabelsParams struct {
+
+ /* ID.
+
+ String describing an endpoint with the format ``[prefix:]id``. If no prefix
+ is specified, a prefix of ``cilium-local:`` is assumed. Not all endpoints
+ will be addressable by all endpoint ID prefixes with the exception of the
+ local Cilium UUID which is assigned to all endpoints.
+
+ Supported endpoint id prefixes:
+ - cilium-local: Local Cilium endpoint UUID, e.g. cilium-local:3389595
+ - cilium-global: Global Cilium endpoint UUID, e.g. cilium-global:cluster1:nodeX:452343
+ - cni-attachment-id: CNI attachment ID, e.g. cni-attachment-id:22222:eth0
+ - container-id: Container runtime ID, e.g. container-id:22222 (deprecated, may not be unique)
+ - container-name: Container name, e.g. container-name:foobar (deprecated, may not be unique)
+ - pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar (deprecated, may not be unique)
+ - cep-name: cep name for this container if K8s is enabled, e.g. pod-name:default:foobar-net1
+ - docker-endpoint: Docker libnetwork endpoint ID, e.g. docker-endpoint:4444
+
+ */
+ ID string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the get endpoint ID labels params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetEndpointIDLabelsParams) WithDefaults() *GetEndpointIDLabelsParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the get endpoint ID labels params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetEndpointIDLabelsParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the get endpoint ID labels params
+func (o *GetEndpointIDLabelsParams) WithTimeout(timeout time.Duration) *GetEndpointIDLabelsParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the get endpoint ID labels params
+func (o *GetEndpointIDLabelsParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the get endpoint ID labels params
+func (o *GetEndpointIDLabelsParams) WithContext(ctx context.Context) *GetEndpointIDLabelsParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the get endpoint ID labels params
+func (o *GetEndpointIDLabelsParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the get endpoint ID labels params
+func (o *GetEndpointIDLabelsParams) WithHTTPClient(client *http.Client) *GetEndpointIDLabelsParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the get endpoint ID labels params
+func (o *GetEndpointIDLabelsParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithID adds the id to the get endpoint ID labels params
+func (o *GetEndpointIDLabelsParams) WithID(id string) *GetEndpointIDLabelsParams {
+ o.SetID(id)
+ return o
+}
+
+// SetID adds the id to the get endpoint ID labels params
+func (o *GetEndpointIDLabelsParams) SetID(id string) {
+ o.ID = id
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *GetEndpointIDLabelsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ // path param id
+ if err := r.SetPathParam("id", o.ID); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_labels_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_labels_responses.go
new file mode 100644
index 000000000..3f4e5138d
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_labels_responses.go
@@ -0,0 +1,215 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package endpoint
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/cilium/cilium/api/v1/models"
+)
+
+// GetEndpointIDLabelsReader is a Reader for the GetEndpointIDLabels structure.
+type GetEndpointIDLabelsReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *GetEndpointIDLabelsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewGetEndpointIDLabelsOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 404:
+ result := NewGetEndpointIDLabelsNotFound()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ case 429:
+ result := NewGetEndpointIDLabelsTooManyRequests()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ default:
+ return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code())
+ }
+}
+
+// NewGetEndpointIDLabelsOK creates a GetEndpointIDLabelsOK with default headers values
+func NewGetEndpointIDLabelsOK() *GetEndpointIDLabelsOK {
+ return &GetEndpointIDLabelsOK{}
+}
+
+/*
+GetEndpointIDLabelsOK describes a response with status code 200, with default header values.
+
+Success
+*/
+type GetEndpointIDLabelsOK struct {
+ Payload *models.LabelConfiguration
+}
+
+// IsSuccess returns true when this get endpoint Id labels o k response has a 2xx status code
+func (o *GetEndpointIDLabelsOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this get endpoint Id labels o k response has a 3xx status code
+func (o *GetEndpointIDLabelsOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get endpoint Id labels o k response has a 4xx status code
+func (o *GetEndpointIDLabelsOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get endpoint Id labels o k response has a 5xx status code
+func (o *GetEndpointIDLabelsOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get endpoint Id labels o k response a status code equal to that given
+func (o *GetEndpointIDLabelsOK) IsCode(code int) bool {
+ return code == 200
+}
+
+func (o *GetEndpointIDLabelsOK) Error() string {
+ return fmt.Sprintf("[GET /endpoint/{id}/labels][%d] getEndpointIdLabelsOK %+v", 200, o.Payload)
+}
+
+func (o *GetEndpointIDLabelsOK) String() string {
+ return fmt.Sprintf("[GET /endpoint/{id}/labels][%d] getEndpointIdLabelsOK %+v", 200, o.Payload)
+}
+
+func (o *GetEndpointIDLabelsOK) GetPayload() *models.LabelConfiguration {
+ return o.Payload
+}
+
+func (o *GetEndpointIDLabelsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ o.Payload = new(models.LabelConfiguration)
+
+ // response payload
+ if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewGetEndpointIDLabelsNotFound creates a GetEndpointIDLabelsNotFound with default headers values
+func NewGetEndpointIDLabelsNotFound() *GetEndpointIDLabelsNotFound {
+ return &GetEndpointIDLabelsNotFound{}
+}
+
+/*
+GetEndpointIDLabelsNotFound describes a response with status code 404, with default header values.
+
+Endpoint not found
+*/
+type GetEndpointIDLabelsNotFound struct {
+}
+
+// IsSuccess returns true when this get endpoint Id labels not found response has a 2xx status code
+func (o *GetEndpointIDLabelsNotFound) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this get endpoint Id labels not found response has a 3xx status code
+func (o *GetEndpointIDLabelsNotFound) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get endpoint Id labels not found response has a 4xx status code
+func (o *GetEndpointIDLabelsNotFound) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this get endpoint Id labels not found response has a 5xx status code
+func (o *GetEndpointIDLabelsNotFound) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get endpoint Id labels not found response a status code equal to that given
+func (o *GetEndpointIDLabelsNotFound) IsCode(code int) bool {
+ return code == 404
+}
+
+func (o *GetEndpointIDLabelsNotFound) Error() string {
+ return fmt.Sprintf("[GET /endpoint/{id}/labels][%d] getEndpointIdLabelsNotFound ", 404)
+}
+
+func (o *GetEndpointIDLabelsNotFound) String() string {
+ return fmt.Sprintf("[GET /endpoint/{id}/labels][%d] getEndpointIdLabelsNotFound ", 404)
+}
+
+func (o *GetEndpointIDLabelsNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
+
+// NewGetEndpointIDLabelsTooManyRequests creates a GetEndpointIDLabelsTooManyRequests with default headers values
+func NewGetEndpointIDLabelsTooManyRequests() *GetEndpointIDLabelsTooManyRequests {
+ return &GetEndpointIDLabelsTooManyRequests{}
+}
+
+/*
+GetEndpointIDLabelsTooManyRequests describes a response with status code 429, with default header values.
+
+Rate-limiting too many requests in the given time frame
+*/
+type GetEndpointIDLabelsTooManyRequests struct {
+}
+
+// IsSuccess returns true when this get endpoint Id labels too many requests response has a 2xx status code
+func (o *GetEndpointIDLabelsTooManyRequests) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this get endpoint Id labels too many requests response has a 3xx status code
+func (o *GetEndpointIDLabelsTooManyRequests) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get endpoint Id labels too many requests response has a 4xx status code
+func (o *GetEndpointIDLabelsTooManyRequests) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this get endpoint Id labels too many requests response has a 5xx status code
+func (o *GetEndpointIDLabelsTooManyRequests) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get endpoint Id labels too many requests response a status code equal to that given
+func (o *GetEndpointIDLabelsTooManyRequests) IsCode(code int) bool {
+ return code == 429
+}
+
+func (o *GetEndpointIDLabelsTooManyRequests) Error() string {
+ return fmt.Sprintf("[GET /endpoint/{id}/labels][%d] getEndpointIdLabelsTooManyRequests ", 429)
+}
+
+func (o *GetEndpointIDLabelsTooManyRequests) String() string {
+ return fmt.Sprintf("[GET /endpoint/{id}/labels][%d] getEndpointIdLabelsTooManyRequests ", 429)
+}
+
+func (o *GetEndpointIDLabelsTooManyRequests) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_log_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_log_parameters.go
new file mode 100644
index 000000000..f93b295ca
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_log_parameters.go
@@ -0,0 +1,168 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package endpoint
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewGetEndpointIDLogParams creates a new GetEndpointIDLogParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewGetEndpointIDLogParams() *GetEndpointIDLogParams {
+ return &GetEndpointIDLogParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewGetEndpointIDLogParamsWithTimeout creates a new GetEndpointIDLogParams object
+// with the ability to set a timeout on a request.
+func NewGetEndpointIDLogParamsWithTimeout(timeout time.Duration) *GetEndpointIDLogParams {
+ return &GetEndpointIDLogParams{
+ timeout: timeout,
+ }
+}
+
+// NewGetEndpointIDLogParamsWithContext creates a new GetEndpointIDLogParams object
+// with the ability to set a context for a request.
+func NewGetEndpointIDLogParamsWithContext(ctx context.Context) *GetEndpointIDLogParams {
+ return &GetEndpointIDLogParams{
+ Context: ctx,
+ }
+}
+
+// NewGetEndpointIDLogParamsWithHTTPClient creates a new GetEndpointIDLogParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewGetEndpointIDLogParamsWithHTTPClient(client *http.Client) *GetEndpointIDLogParams {
+ return &GetEndpointIDLogParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+GetEndpointIDLogParams contains all the parameters to send to the API endpoint
+
+ for the get endpoint ID log operation.
+
+ Typically these are written to a http.Request.
+*/
+type GetEndpointIDLogParams struct {
+
+ /* ID.
+
+ String describing an endpoint with the format ``[prefix:]id``. If no prefix
+ is specified, a prefix of ``cilium-local:`` is assumed. Not all endpoints
+ will be addressable by all endpoint ID prefixes with the exception of the
+ local Cilium UUID which is assigned to all endpoints.
+
+ Supported endpoint id prefixes:
+ - cilium-local: Local Cilium endpoint UUID, e.g. cilium-local:3389595
+ - cilium-global: Global Cilium endpoint UUID, e.g. cilium-global:cluster1:nodeX:452343
+ - cni-attachment-id: CNI attachment ID, e.g. cni-attachment-id:22222:eth0
+ - container-id: Container runtime ID, e.g. container-id:22222 (deprecated, may not be unique)
+ - container-name: Container name, e.g. container-name:foobar (deprecated, may not be unique)
+ - pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar (deprecated, may not be unique)
+ - cep-name: cep name for this container if K8s is enabled, e.g. pod-name:default:foobar-net1
+ - docker-endpoint: Docker libnetwork endpoint ID, e.g. docker-endpoint:4444
+
+ */
+ ID string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the get endpoint ID log params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetEndpointIDLogParams) WithDefaults() *GetEndpointIDLogParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the get endpoint ID log params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetEndpointIDLogParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the get endpoint ID log params
+func (o *GetEndpointIDLogParams) WithTimeout(timeout time.Duration) *GetEndpointIDLogParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the get endpoint ID log params
+func (o *GetEndpointIDLogParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the get endpoint ID log params
+func (o *GetEndpointIDLogParams) WithContext(ctx context.Context) *GetEndpointIDLogParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the get endpoint ID log params
+func (o *GetEndpointIDLogParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the get endpoint ID log params
+func (o *GetEndpointIDLogParams) WithHTTPClient(client *http.Client) *GetEndpointIDLogParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the get endpoint ID log params
+func (o *GetEndpointIDLogParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithID adds the id to the get endpoint ID log params
+func (o *GetEndpointIDLogParams) WithID(id string) *GetEndpointIDLogParams {
+ o.SetID(id)
+ return o
+}
+
+// SetID adds the id to the get endpoint ID log params
+func (o *GetEndpointIDLogParams) SetID(id string) {
+ o.ID = id
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *GetEndpointIDLogParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ // path param id
+ if err := r.SetPathParam("id", o.ID); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_log_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_log_responses.go
new file mode 100644
index 000000000..db7227d1d
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_log_responses.go
@@ -0,0 +1,270 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package endpoint
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/cilium/cilium/api/v1/models"
+)
+
+// GetEndpointIDLogReader is a Reader for the GetEndpointIDLog structure.
+type GetEndpointIDLogReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *GetEndpointIDLogReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewGetEndpointIDLogOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 400:
+ result := NewGetEndpointIDLogInvalid()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ case 404:
+ result := NewGetEndpointIDLogNotFound()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ case 429:
+ result := NewGetEndpointIDLogTooManyRequests()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ default:
+ return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code())
+ }
+}
+
+// NewGetEndpointIDLogOK creates a GetEndpointIDLogOK with default headers values
+func NewGetEndpointIDLogOK() *GetEndpointIDLogOK {
+ return &GetEndpointIDLogOK{}
+}
+
+/*
+GetEndpointIDLogOK describes a response with status code 200, with default header values.
+
+Success
+*/
+type GetEndpointIDLogOK struct {
+ Payload models.EndpointStatusLog
+}
+
+// IsSuccess returns true when this get endpoint Id log o k response has a 2xx status code
+func (o *GetEndpointIDLogOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this get endpoint Id log o k response has a 3xx status code
+func (o *GetEndpointIDLogOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get endpoint Id log o k response has a 4xx status code
+func (o *GetEndpointIDLogOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get endpoint Id log o k response has a 5xx status code
+func (o *GetEndpointIDLogOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get endpoint Id log o k response a status code equal to that given
+func (o *GetEndpointIDLogOK) IsCode(code int) bool {
+ return code == 200
+}
+
+func (o *GetEndpointIDLogOK) Error() string {
+ return fmt.Sprintf("[GET /endpoint/{id}/log][%d] getEndpointIdLogOK %+v", 200, o.Payload)
+}
+
+func (o *GetEndpointIDLogOK) String() string {
+ return fmt.Sprintf("[GET /endpoint/{id}/log][%d] getEndpointIdLogOK %+v", 200, o.Payload)
+}
+
+func (o *GetEndpointIDLogOK) GetPayload() models.EndpointStatusLog {
+ return o.Payload
+}
+
+func (o *GetEndpointIDLogOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewGetEndpointIDLogInvalid creates a GetEndpointIDLogInvalid with default headers values
+func NewGetEndpointIDLogInvalid() *GetEndpointIDLogInvalid {
+ return &GetEndpointIDLogInvalid{}
+}
+
+/*
+GetEndpointIDLogInvalid describes a response with status code 400, with default header values.
+
+Invalid identity provided
+*/
+type GetEndpointIDLogInvalid struct {
+}
+
+// IsSuccess returns true when this get endpoint Id log invalid response has a 2xx status code
+func (o *GetEndpointIDLogInvalid) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this get endpoint Id log invalid response has a 3xx status code
+func (o *GetEndpointIDLogInvalid) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get endpoint Id log invalid response has a 4xx status code
+func (o *GetEndpointIDLogInvalid) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this get endpoint Id log invalid response has a 5xx status code
+func (o *GetEndpointIDLogInvalid) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get endpoint Id log invalid response a status code equal to that given
+func (o *GetEndpointIDLogInvalid) IsCode(code int) bool {
+ return code == 400
+}
+
+func (o *GetEndpointIDLogInvalid) Error() string {
+ return fmt.Sprintf("[GET /endpoint/{id}/log][%d] getEndpointIdLogInvalid ", 400)
+}
+
+func (o *GetEndpointIDLogInvalid) String() string {
+ return fmt.Sprintf("[GET /endpoint/{id}/log][%d] getEndpointIdLogInvalid ", 400)
+}
+
+func (o *GetEndpointIDLogInvalid) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
+
+// NewGetEndpointIDLogNotFound creates a GetEndpointIDLogNotFound with default headers values
+func NewGetEndpointIDLogNotFound() *GetEndpointIDLogNotFound {
+ return &GetEndpointIDLogNotFound{}
+}
+
+/*
+GetEndpointIDLogNotFound describes a response with status code 404, with default header values.
+
+Endpoint not found
+*/
+type GetEndpointIDLogNotFound struct {
+}
+
+// IsSuccess returns true when this get endpoint Id log not found response has a 2xx status code
+func (o *GetEndpointIDLogNotFound) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this get endpoint Id log not found response has a 3xx status code
+func (o *GetEndpointIDLogNotFound) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get endpoint Id log not found response has a 4xx status code
+func (o *GetEndpointIDLogNotFound) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this get endpoint Id log not found response has a 5xx status code
+func (o *GetEndpointIDLogNotFound) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get endpoint Id log not found response a status code equal to that given
+func (o *GetEndpointIDLogNotFound) IsCode(code int) bool {
+ return code == 404
+}
+
+func (o *GetEndpointIDLogNotFound) Error() string {
+ return fmt.Sprintf("[GET /endpoint/{id}/log][%d] getEndpointIdLogNotFound ", 404)
+}
+
+func (o *GetEndpointIDLogNotFound) String() string {
+ return fmt.Sprintf("[GET /endpoint/{id}/log][%d] getEndpointIdLogNotFound ", 404)
+}
+
+func (o *GetEndpointIDLogNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
+
+// NewGetEndpointIDLogTooManyRequests creates a GetEndpointIDLogTooManyRequests with default headers values
+func NewGetEndpointIDLogTooManyRequests() *GetEndpointIDLogTooManyRequests {
+ return &GetEndpointIDLogTooManyRequests{}
+}
+
+/*
+GetEndpointIDLogTooManyRequests describes a response with status code 429, with default header values.
+
+Rate-limiting too many requests in the given time frame
+*/
+type GetEndpointIDLogTooManyRequests struct {
+}
+
+// IsSuccess returns true when this get endpoint Id log too many requests response has a 2xx status code
+func (o *GetEndpointIDLogTooManyRequests) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this get endpoint Id log too many requests response has a 3xx status code
+func (o *GetEndpointIDLogTooManyRequests) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get endpoint Id log too many requests response has a 4xx status code
+func (o *GetEndpointIDLogTooManyRequests) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this get endpoint Id log too many requests response has a 5xx status code
+func (o *GetEndpointIDLogTooManyRequests) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get endpoint Id log too many requests response a status code equal to that given
+func (o *GetEndpointIDLogTooManyRequests) IsCode(code int) bool {
+ return code == 429
+}
+
+func (o *GetEndpointIDLogTooManyRequests) Error() string {
+ return fmt.Sprintf("[GET /endpoint/{id}/log][%d] getEndpointIdLogTooManyRequests ", 429)
+}
+
+func (o *GetEndpointIDLogTooManyRequests) String() string {
+ return fmt.Sprintf("[GET /endpoint/{id}/log][%d] getEndpointIdLogTooManyRequests ", 429)
+}
+
+func (o *GetEndpointIDLogTooManyRequests) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_parameters.go
new file mode 100644
index 000000000..4bad76a97
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_parameters.go
@@ -0,0 +1,168 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package endpoint
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewGetEndpointIDParams creates a new GetEndpointIDParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewGetEndpointIDParams() *GetEndpointIDParams {
+ return &GetEndpointIDParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewGetEndpointIDParamsWithTimeout creates a new GetEndpointIDParams object
+// with the ability to set a timeout on a request.
+func NewGetEndpointIDParamsWithTimeout(timeout time.Duration) *GetEndpointIDParams {
+ return &GetEndpointIDParams{
+ timeout: timeout,
+ }
+}
+
+// NewGetEndpointIDParamsWithContext creates a new GetEndpointIDParams object
+// with the ability to set a context for a request.
+func NewGetEndpointIDParamsWithContext(ctx context.Context) *GetEndpointIDParams {
+ return &GetEndpointIDParams{
+ Context: ctx,
+ }
+}
+
+// NewGetEndpointIDParamsWithHTTPClient creates a new GetEndpointIDParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewGetEndpointIDParamsWithHTTPClient(client *http.Client) *GetEndpointIDParams {
+ return &GetEndpointIDParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+GetEndpointIDParams contains all the parameters to send to the API endpoint
+
+ for the get endpoint ID operation.
+
+ Typically these are written to a http.Request.
+*/
+type GetEndpointIDParams struct {
+
+ /* ID.
+
+ String describing an endpoint with the format ``[prefix:]id``. If no prefix
+ is specified, a prefix of ``cilium-local:`` is assumed. Not all endpoints
+ will be addressable by all endpoint ID prefixes with the exception of the
+ local Cilium UUID which is assigned to all endpoints.
+
+ Supported endpoint id prefixes:
+ - cilium-local: Local Cilium endpoint UUID, e.g. cilium-local:3389595
+ - cilium-global: Global Cilium endpoint UUID, e.g. cilium-global:cluster1:nodeX:452343
+ - cni-attachment-id: CNI attachment ID, e.g. cni-attachment-id:22222:eth0
+ - container-id: Container runtime ID, e.g. container-id:22222 (deprecated, may not be unique)
+ - container-name: Container name, e.g. container-name:foobar (deprecated, may not be unique)
+ - pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar (deprecated, may not be unique)
+ - cep-name: cep name for this container if K8s is enabled, e.g. pod-name:default:foobar-net1
+ - docker-endpoint: Docker libnetwork endpoint ID, e.g. docker-endpoint:4444
+
+ */
+ ID string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the get endpoint ID params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetEndpointIDParams) WithDefaults() *GetEndpointIDParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the get endpoint ID params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetEndpointIDParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the get endpoint ID params
+func (o *GetEndpointIDParams) WithTimeout(timeout time.Duration) *GetEndpointIDParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the get endpoint ID params
+func (o *GetEndpointIDParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the get endpoint ID params
+func (o *GetEndpointIDParams) WithContext(ctx context.Context) *GetEndpointIDParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the get endpoint ID params
+func (o *GetEndpointIDParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the get endpoint ID params
+func (o *GetEndpointIDParams) WithHTTPClient(client *http.Client) *GetEndpointIDParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the get endpoint ID params
+func (o *GetEndpointIDParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithID adds the id to the get endpoint ID params
+func (o *GetEndpointIDParams) WithID(id string) *GetEndpointIDParams {
+ o.SetID(id)
+ return o
+}
+
+// SetID adds the id to the get endpoint ID params
+func (o *GetEndpointIDParams) SetID(id string) {
+ o.ID = id
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *GetEndpointIDParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ // path param id
+ if err := r.SetPathParam("id", o.ID); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_responses.go
new file mode 100644
index 000000000..4d07e3aa4
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_responses.go
@@ -0,0 +1,282 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package endpoint
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/cilium/cilium/api/v1/models"
+)
+
+// GetEndpointIDReader is a Reader for the GetEndpointID structure.
+type GetEndpointIDReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *GetEndpointIDReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewGetEndpointIDOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 400:
+ result := NewGetEndpointIDInvalid()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ case 404:
+ result := NewGetEndpointIDNotFound()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ case 429:
+ result := NewGetEndpointIDTooManyRequests()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ default:
+ return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code())
+ }
+}
+
+// NewGetEndpointIDOK creates a GetEndpointIDOK with default headers values
+func NewGetEndpointIDOK() *GetEndpointIDOK {
+ return &GetEndpointIDOK{}
+}
+
+/*
+GetEndpointIDOK describes a response with status code 200, with default header values.
+
+Success
+*/
+type GetEndpointIDOK struct {
+ Payload *models.Endpoint
+}
+
+// IsSuccess returns true when this get endpoint Id o k response has a 2xx status code
+func (o *GetEndpointIDOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this get endpoint Id o k response has a 3xx status code
+func (o *GetEndpointIDOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get endpoint Id o k response has a 4xx status code
+func (o *GetEndpointIDOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get endpoint Id o k response has a 5xx status code
+func (o *GetEndpointIDOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get endpoint Id o k response a status code equal to that given
+func (o *GetEndpointIDOK) IsCode(code int) bool {
+ return code == 200
+}
+
+func (o *GetEndpointIDOK) Error() string {
+ return fmt.Sprintf("[GET /endpoint/{id}][%d] getEndpointIdOK %+v", 200, o.Payload)
+}
+
+func (o *GetEndpointIDOK) String() string {
+ return fmt.Sprintf("[GET /endpoint/{id}][%d] getEndpointIdOK %+v", 200, o.Payload)
+}
+
+func (o *GetEndpointIDOK) GetPayload() *models.Endpoint {
+ return o.Payload
+}
+
+func (o *GetEndpointIDOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ o.Payload = new(models.Endpoint)
+
+ // response payload
+ if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewGetEndpointIDInvalid creates a GetEndpointIDInvalid with default headers values
+func NewGetEndpointIDInvalid() *GetEndpointIDInvalid {
+ return &GetEndpointIDInvalid{}
+}
+
+/*
+GetEndpointIDInvalid describes a response with status code 400, with default header values.
+
+Invalid endpoint ID format for specified type
+*/
+type GetEndpointIDInvalid struct {
+ Payload models.Error
+}
+
+// IsSuccess returns true when this get endpoint Id invalid response has a 2xx status code
+func (o *GetEndpointIDInvalid) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this get endpoint Id invalid response has a 3xx status code
+func (o *GetEndpointIDInvalid) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get endpoint Id invalid response has a 4xx status code
+func (o *GetEndpointIDInvalid) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this get endpoint Id invalid response has a 5xx status code
+func (o *GetEndpointIDInvalid) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get endpoint Id invalid response a status code equal to that given
+func (o *GetEndpointIDInvalid) IsCode(code int) bool {
+ return code == 400
+}
+
+func (o *GetEndpointIDInvalid) Error() string {
+ return fmt.Sprintf("[GET /endpoint/{id}][%d] getEndpointIdInvalid %+v", 400, o.Payload)
+}
+
+func (o *GetEndpointIDInvalid) String() string {
+ return fmt.Sprintf("[GET /endpoint/{id}][%d] getEndpointIdInvalid %+v", 400, o.Payload)
+}
+
+func (o *GetEndpointIDInvalid) GetPayload() models.Error {
+ return o.Payload
+}
+
+func (o *GetEndpointIDInvalid) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewGetEndpointIDNotFound creates a GetEndpointIDNotFound with default headers values
+func NewGetEndpointIDNotFound() *GetEndpointIDNotFound {
+ return &GetEndpointIDNotFound{}
+}
+
+/*
+GetEndpointIDNotFound describes a response with status code 404, with default header values.
+
+Endpoint not found
+*/
+type GetEndpointIDNotFound struct {
+}
+
+// IsSuccess returns true when this get endpoint Id not found response has a 2xx status code
+func (o *GetEndpointIDNotFound) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this get endpoint Id not found response has a 3xx status code
+func (o *GetEndpointIDNotFound) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get endpoint Id not found response has a 4xx status code
+func (o *GetEndpointIDNotFound) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this get endpoint Id not found response has a 5xx status code
+func (o *GetEndpointIDNotFound) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get endpoint Id not found response a status code equal to that given
+func (o *GetEndpointIDNotFound) IsCode(code int) bool {
+ return code == 404
+}
+
+func (o *GetEndpointIDNotFound) Error() string {
+ return fmt.Sprintf("[GET /endpoint/{id}][%d] getEndpointIdNotFound ", 404)
+}
+
+func (o *GetEndpointIDNotFound) String() string {
+ return fmt.Sprintf("[GET /endpoint/{id}][%d] getEndpointIdNotFound ", 404)
+}
+
+func (o *GetEndpointIDNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
+
+// NewGetEndpointIDTooManyRequests creates a GetEndpointIDTooManyRequests with default headers values
+func NewGetEndpointIDTooManyRequests() *GetEndpointIDTooManyRequests {
+ return &GetEndpointIDTooManyRequests{}
+}
+
+/*
+GetEndpointIDTooManyRequests describes a response with status code 429, with default header values.
+
+Rate-limiting too many requests in the given time frame
+*/
+type GetEndpointIDTooManyRequests struct {
+}
+
+// IsSuccess returns true when this get endpoint Id too many requests response has a 2xx status code
+func (o *GetEndpointIDTooManyRequests) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this get endpoint Id too many requests response has a 3xx status code
+func (o *GetEndpointIDTooManyRequests) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get endpoint Id too many requests response has a 4xx status code
+func (o *GetEndpointIDTooManyRequests) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this get endpoint Id too many requests response has a 5xx status code
+func (o *GetEndpointIDTooManyRequests) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get endpoint Id too many requests response a status code equal to that given
+func (o *GetEndpointIDTooManyRequests) IsCode(code int) bool {
+ return code == 429
+}
+
+func (o *GetEndpointIDTooManyRequests) Error() string {
+ return fmt.Sprintf("[GET /endpoint/{id}][%d] getEndpointIdTooManyRequests ", 429)
+}
+
+func (o *GetEndpointIDTooManyRequests) String() string {
+ return fmt.Sprintf("[GET /endpoint/{id}][%d] getEndpointIdTooManyRequests ", 429)
+}
+
+func (o *GetEndpointIDTooManyRequests) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_parameters.go
new file mode 100644
index 000000000..fa20da9d3
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_parameters.go
@@ -0,0 +1,157 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package endpoint
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/cilium/cilium/api/v1/models"
+)
+
+// NewGetEndpointParams creates a new GetEndpointParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewGetEndpointParams() *GetEndpointParams {
+ return &GetEndpointParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewGetEndpointParamsWithTimeout creates a new GetEndpointParams object
+// with the ability to set a timeout on a request.
+func NewGetEndpointParamsWithTimeout(timeout time.Duration) *GetEndpointParams {
+ return &GetEndpointParams{
+ timeout: timeout,
+ }
+}
+
+// NewGetEndpointParamsWithContext creates a new GetEndpointParams object
+// with the ability to set a context for a request.
+func NewGetEndpointParamsWithContext(ctx context.Context) *GetEndpointParams {
+ return &GetEndpointParams{
+ Context: ctx,
+ }
+}
+
+// NewGetEndpointParamsWithHTTPClient creates a new GetEndpointParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewGetEndpointParamsWithHTTPClient(client *http.Client) *GetEndpointParams {
+ return &GetEndpointParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+GetEndpointParams contains all the parameters to send to the API endpoint
+
+ for the get endpoint operation.
+
+ Typically these are written to a http.Request.
+*/
+type GetEndpointParams struct {
+
+ /* Labels.
+
+ List of labels
+
+ */
+ Labels models.Labels
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the get endpoint params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetEndpointParams) WithDefaults() *GetEndpointParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the get endpoint params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetEndpointParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the get endpoint params
+func (o *GetEndpointParams) WithTimeout(timeout time.Duration) *GetEndpointParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the get endpoint params
+func (o *GetEndpointParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the get endpoint params
+func (o *GetEndpointParams) WithContext(ctx context.Context) *GetEndpointParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the get endpoint params
+func (o *GetEndpointParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the get endpoint params
+func (o *GetEndpointParams) WithHTTPClient(client *http.Client) *GetEndpointParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the get endpoint params
+func (o *GetEndpointParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithLabels adds the labels to the get endpoint params
+func (o *GetEndpointParams) WithLabels(labels models.Labels) *GetEndpointParams {
+ o.SetLabels(labels)
+ return o
+}
+
+// SetLabels adds the labels to the get endpoint params
+func (o *GetEndpointParams) SetLabels(labels models.Labels) {
+ o.Labels = labels
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *GetEndpointParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+ if o.Labels != nil {
+ if err := r.SetBodyParam(o.Labels); err != nil {
+ return err
+ }
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_responses.go
new file mode 100644
index 000000000..5193537ea
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_responses.go
@@ -0,0 +1,213 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package endpoint
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/cilium/cilium/api/v1/models"
+)
+
+// GetEndpointReader is a Reader for the GetEndpoint structure.
+type GetEndpointReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *GetEndpointReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewGetEndpointOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 404:
+ result := NewGetEndpointNotFound()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ case 429:
+ result := NewGetEndpointTooManyRequests()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ default:
+ return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code())
+ }
+}
+
+// NewGetEndpointOK creates a GetEndpointOK with default headers values
+func NewGetEndpointOK() *GetEndpointOK {
+ return &GetEndpointOK{}
+}
+
+/*
+GetEndpointOK describes a response with status code 200, with default header values.
+
+Success
+*/
+type GetEndpointOK struct {
+ Payload []*models.Endpoint
+}
+
+// IsSuccess returns true when this get endpoint o k response has a 2xx status code
+func (o *GetEndpointOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this get endpoint o k response has a 3xx status code
+func (o *GetEndpointOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get endpoint o k response has a 4xx status code
+func (o *GetEndpointOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get endpoint o k response has a 5xx status code
+func (o *GetEndpointOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get endpoint o k response a status code equal to that given
+func (o *GetEndpointOK) IsCode(code int) bool {
+ return code == 200
+}
+
+func (o *GetEndpointOK) Error() string {
+ return fmt.Sprintf("[GET /endpoint][%d] getEndpointOK %+v", 200, o.Payload)
+}
+
+func (o *GetEndpointOK) String() string {
+ return fmt.Sprintf("[GET /endpoint][%d] getEndpointOK %+v", 200, o.Payload)
+}
+
+func (o *GetEndpointOK) GetPayload() []*models.Endpoint {
+ return o.Payload
+}
+
+func (o *GetEndpointOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewGetEndpointNotFound creates a GetEndpointNotFound with default headers values
+func NewGetEndpointNotFound() *GetEndpointNotFound {
+ return &GetEndpointNotFound{}
+}
+
+/*
+GetEndpointNotFound describes a response with status code 404, with default header values.
+
+Endpoints with provided parameters not found
+*/
+type GetEndpointNotFound struct {
+}
+
+// IsSuccess returns true when this get endpoint not found response has a 2xx status code
+func (o *GetEndpointNotFound) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this get endpoint not found response has a 3xx status code
+func (o *GetEndpointNotFound) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get endpoint not found response has a 4xx status code
+func (o *GetEndpointNotFound) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this get endpoint not found response has a 5xx status code
+func (o *GetEndpointNotFound) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get endpoint not found response a status code equal to that given
+func (o *GetEndpointNotFound) IsCode(code int) bool {
+ return code == 404
+}
+
+func (o *GetEndpointNotFound) Error() string {
+ return fmt.Sprintf("[GET /endpoint][%d] getEndpointNotFound ", 404)
+}
+
+func (o *GetEndpointNotFound) String() string {
+ return fmt.Sprintf("[GET /endpoint][%d] getEndpointNotFound ", 404)
+}
+
+func (o *GetEndpointNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
+
+// NewGetEndpointTooManyRequests creates a GetEndpointTooManyRequests with default headers values
+func NewGetEndpointTooManyRequests() *GetEndpointTooManyRequests {
+ return &GetEndpointTooManyRequests{}
+}
+
+/*
+GetEndpointTooManyRequests describes a response with status code 429, with default header values.
+
+Rate-limiting too many requests in the given time frame
+*/
+type GetEndpointTooManyRequests struct {
+}
+
+// IsSuccess returns true when this get endpoint too many requests response has a 2xx status code
+func (o *GetEndpointTooManyRequests) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this get endpoint too many requests response has a 3xx status code
+func (o *GetEndpointTooManyRequests) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get endpoint too many requests response has a 4xx status code
+func (o *GetEndpointTooManyRequests) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this get endpoint too many requests response has a 5xx status code
+func (o *GetEndpointTooManyRequests) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get endpoint too many requests response a status code equal to that given
+func (o *GetEndpointTooManyRequests) IsCode(code int) bool {
+ return code == 429
+}
+
+func (o *GetEndpointTooManyRequests) Error() string {
+ return fmt.Sprintf("[GET /endpoint][%d] getEndpointTooManyRequests ", 429)
+}
+
+func (o *GetEndpointTooManyRequests) String() string {
+ return fmt.Sprintf("[GET /endpoint][%d] getEndpointTooManyRequests ", 429)
+}
+
+func (o *GetEndpointTooManyRequests) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/patch_endpoint_id_config_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/patch_endpoint_id_config_parameters.go
new file mode 100644
index 000000000..9ecd8ef8a
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/patch_endpoint_id_config_parameters.go
@@ -0,0 +1,189 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package endpoint
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/cilium/cilium/api/v1/models"
+)
+
+// NewPatchEndpointIDConfigParams creates a new PatchEndpointIDConfigParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewPatchEndpointIDConfigParams() *PatchEndpointIDConfigParams {
+ return &PatchEndpointIDConfigParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewPatchEndpointIDConfigParamsWithTimeout creates a new PatchEndpointIDConfigParams object
+// with the ability to set a timeout on a request.
+func NewPatchEndpointIDConfigParamsWithTimeout(timeout time.Duration) *PatchEndpointIDConfigParams {
+ return &PatchEndpointIDConfigParams{
+ timeout: timeout,
+ }
+}
+
+// NewPatchEndpointIDConfigParamsWithContext creates a new PatchEndpointIDConfigParams object
+// with the ability to set a context for a request.
+func NewPatchEndpointIDConfigParamsWithContext(ctx context.Context) *PatchEndpointIDConfigParams {
+ return &PatchEndpointIDConfigParams{
+ Context: ctx,
+ }
+}
+
+// NewPatchEndpointIDConfigParamsWithHTTPClient creates a new PatchEndpointIDConfigParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewPatchEndpointIDConfigParamsWithHTTPClient(client *http.Client) *PatchEndpointIDConfigParams {
+ return &PatchEndpointIDConfigParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+PatchEndpointIDConfigParams contains all the parameters to send to the API endpoint
+
+ for the patch endpoint ID config operation.
+
+ Typically these are written to a http.Request.
+*/
+type PatchEndpointIDConfigParams struct {
+
+ // EndpointConfiguration.
+ EndpointConfiguration *models.EndpointConfigurationSpec
+
+ /* ID.
+
+ String describing an endpoint with the format ``[prefix:]id``. If no prefix
+ is specified, a prefix of ``cilium-local:`` is assumed. Not all endpoints
+ will be addressable by all endpoint ID prefixes with the exception of the
+ local Cilium UUID which is assigned to all endpoints.
+
+ Supported endpoint id prefixes:
+ - cilium-local: Local Cilium endpoint UUID, e.g. cilium-local:3389595
+ - cilium-global: Global Cilium endpoint UUID, e.g. cilium-global:cluster1:nodeX:452343
+ - cni-attachment-id: CNI attachment ID, e.g. cni-attachment-id:22222:eth0
+ - container-id: Container runtime ID, e.g. container-id:22222 (deprecated, may not be unique)
+ - container-name: Container name, e.g. container-name:foobar (deprecated, may not be unique)
+ - pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar (deprecated, may not be unique)
+ - cep-name: cep name for this container if K8s is enabled, e.g. pod-name:default:foobar-net1
+ - docker-endpoint: Docker libnetwork endpoint ID, e.g. docker-endpoint:4444
+
+ */
+ ID string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the patch endpoint ID config params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *PatchEndpointIDConfigParams) WithDefaults() *PatchEndpointIDConfigParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the patch endpoint ID config params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *PatchEndpointIDConfigParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the patch endpoint ID config params
+func (o *PatchEndpointIDConfigParams) WithTimeout(timeout time.Duration) *PatchEndpointIDConfigParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the patch endpoint ID config params
+func (o *PatchEndpointIDConfigParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the patch endpoint ID config params
+func (o *PatchEndpointIDConfigParams) WithContext(ctx context.Context) *PatchEndpointIDConfigParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the patch endpoint ID config params
+func (o *PatchEndpointIDConfigParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the patch endpoint ID config params
+func (o *PatchEndpointIDConfigParams) WithHTTPClient(client *http.Client) *PatchEndpointIDConfigParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the patch endpoint ID config params
+func (o *PatchEndpointIDConfigParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithEndpointConfiguration adds the endpointConfiguration to the patch endpoint ID config params
+func (o *PatchEndpointIDConfigParams) WithEndpointConfiguration(endpointConfiguration *models.EndpointConfigurationSpec) *PatchEndpointIDConfigParams {
+ o.SetEndpointConfiguration(endpointConfiguration)
+ return o
+}
+
+// SetEndpointConfiguration adds the endpointConfiguration to the patch endpoint ID config params
+func (o *PatchEndpointIDConfigParams) SetEndpointConfiguration(endpointConfiguration *models.EndpointConfigurationSpec) {
+ o.EndpointConfiguration = endpointConfiguration
+}
+
+// WithID adds the id to the patch endpoint ID config params
+func (o *PatchEndpointIDConfigParams) WithID(id string) *PatchEndpointIDConfigParams {
+ o.SetID(id)
+ return o
+}
+
+// SetID adds the id to the patch endpoint ID config params
+func (o *PatchEndpointIDConfigParams) SetID(id string) {
+ o.ID = id
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *PatchEndpointIDConfigParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+ if o.EndpointConfiguration != nil {
+ if err := r.SetBodyParam(o.EndpointConfiguration); err != nil {
+ return err
+ }
+ }
+
+ // path param id
+ if err := r.SetPathParam("id", o.ID); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/patch_endpoint_id_config_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/patch_endpoint_id_config_responses.go
new file mode 100644
index 000000000..fc2c4883a
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/patch_endpoint_id_config_responses.go
@@ -0,0 +1,384 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package endpoint
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/cilium/cilium/api/v1/models"
+)
+
+// PatchEndpointIDConfigReader is a Reader for the PatchEndpointIDConfig structure.
+type PatchEndpointIDConfigReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *PatchEndpointIDConfigReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewPatchEndpointIDConfigOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 400:
+ result := NewPatchEndpointIDConfigInvalid()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ case 403:
+ result := NewPatchEndpointIDConfigForbidden()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ case 404:
+ result := NewPatchEndpointIDConfigNotFound()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ case 429:
+ result := NewPatchEndpointIDConfigTooManyRequests()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ case 500:
+ result := NewPatchEndpointIDConfigFailed()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ default:
+ return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code())
+ }
+}
+
+// NewPatchEndpointIDConfigOK creates a PatchEndpointIDConfigOK with default headers values
+func NewPatchEndpointIDConfigOK() *PatchEndpointIDConfigOK {
+ return &PatchEndpointIDConfigOK{}
+}
+
+/*
+PatchEndpointIDConfigOK describes a response with status code 200, with default header values.
+
+Success
+*/
+type PatchEndpointIDConfigOK struct {
+}
+
+// IsSuccess returns true when this patch endpoint Id config o k response has a 2xx status code
+func (o *PatchEndpointIDConfigOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this patch endpoint Id config o k response has a 3xx status code
+func (o *PatchEndpointIDConfigOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this patch endpoint Id config o k response has a 4xx status code
+func (o *PatchEndpointIDConfigOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this patch endpoint Id config o k response has a 5xx status code
+func (o *PatchEndpointIDConfigOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this patch endpoint Id config o k response a status code equal to that given
+func (o *PatchEndpointIDConfigOK) IsCode(code int) bool {
+ return code == 200
+}
+
+func (o *PatchEndpointIDConfigOK) Error() string {
+ return fmt.Sprintf("[PATCH /endpoint/{id}/config][%d] patchEndpointIdConfigOK ", 200)
+}
+
+func (o *PatchEndpointIDConfigOK) String() string {
+ return fmt.Sprintf("[PATCH /endpoint/{id}/config][%d] patchEndpointIdConfigOK ", 200)
+}
+
+func (o *PatchEndpointIDConfigOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
+
+// NewPatchEndpointIDConfigInvalid creates a PatchEndpointIDConfigInvalid with default headers values
+func NewPatchEndpointIDConfigInvalid() *PatchEndpointIDConfigInvalid {
+ return &PatchEndpointIDConfigInvalid{}
+}
+
+/*
+PatchEndpointIDConfigInvalid describes a response with status code 400, with default header values.
+
+Invalid configuration request
+*/
+type PatchEndpointIDConfigInvalid struct {
+}
+
+// IsSuccess returns true when this patch endpoint Id config invalid response has a 2xx status code
+func (o *PatchEndpointIDConfigInvalid) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this patch endpoint Id config invalid response has a 3xx status code
+func (o *PatchEndpointIDConfigInvalid) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this patch endpoint Id config invalid response has a 4xx status code
+func (o *PatchEndpointIDConfigInvalid) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this patch endpoint Id config invalid response has a 5xx status code
+func (o *PatchEndpointIDConfigInvalid) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this patch endpoint Id config invalid response a status code equal to that given
+func (o *PatchEndpointIDConfigInvalid) IsCode(code int) bool {
+ return code == 400
+}
+
+func (o *PatchEndpointIDConfigInvalid) Error() string {
+ return fmt.Sprintf("[PATCH /endpoint/{id}/config][%d] patchEndpointIdConfigInvalid ", 400)
+}
+
+func (o *PatchEndpointIDConfigInvalid) String() string {
+ return fmt.Sprintf("[PATCH /endpoint/{id}/config][%d] patchEndpointIdConfigInvalid ", 400)
+}
+
+func (o *PatchEndpointIDConfigInvalid) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
+
+// NewPatchEndpointIDConfigForbidden creates a PatchEndpointIDConfigForbidden with default headers values
+func NewPatchEndpointIDConfigForbidden() *PatchEndpointIDConfigForbidden {
+ return &PatchEndpointIDConfigForbidden{}
+}
+
+/*
+PatchEndpointIDConfigForbidden describes a response with status code 403, with default header values.
+
+Forbidden
+*/
+type PatchEndpointIDConfigForbidden struct {
+}
+
+// IsSuccess returns true when this patch endpoint Id config forbidden response has a 2xx status code
+func (o *PatchEndpointIDConfigForbidden) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this patch endpoint Id config forbidden response has a 3xx status code
+func (o *PatchEndpointIDConfigForbidden) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this patch endpoint Id config forbidden response has a 4xx status code
+func (o *PatchEndpointIDConfigForbidden) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this patch endpoint Id config forbidden response has a 5xx status code
+func (o *PatchEndpointIDConfigForbidden) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this patch endpoint Id config forbidden response a status code equal to that given
+func (o *PatchEndpointIDConfigForbidden) IsCode(code int) bool {
+ return code == 403
+}
+
+func (o *PatchEndpointIDConfigForbidden) Error() string {
+ return fmt.Sprintf("[PATCH /endpoint/{id}/config][%d] patchEndpointIdConfigForbidden ", 403)
+}
+
+func (o *PatchEndpointIDConfigForbidden) String() string {
+ return fmt.Sprintf("[PATCH /endpoint/{id}/config][%d] patchEndpointIdConfigForbidden ", 403)
+}
+
+func (o *PatchEndpointIDConfigForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
+
+// NewPatchEndpointIDConfigNotFound creates a PatchEndpointIDConfigNotFound with default headers values
+func NewPatchEndpointIDConfigNotFound() *PatchEndpointIDConfigNotFound {
+ return &PatchEndpointIDConfigNotFound{}
+}
+
+/*
+PatchEndpointIDConfigNotFound describes a response with status code 404, with default header values.
+
+Endpoint not found
+*/
+type PatchEndpointIDConfigNotFound struct {
+}
+
+// IsSuccess returns true when this patch endpoint Id config not found response has a 2xx status code
+func (o *PatchEndpointIDConfigNotFound) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this patch endpoint Id config not found response has a 3xx status code
+func (o *PatchEndpointIDConfigNotFound) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this patch endpoint Id config not found response has a 4xx status code
+func (o *PatchEndpointIDConfigNotFound) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this patch endpoint Id config not found response has a 5xx status code
+func (o *PatchEndpointIDConfigNotFound) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this patch endpoint Id config not found response a status code equal to that given
+func (o *PatchEndpointIDConfigNotFound) IsCode(code int) bool {
+ return code == 404
+}
+
+func (o *PatchEndpointIDConfigNotFound) Error() string {
+ return fmt.Sprintf("[PATCH /endpoint/{id}/config][%d] patchEndpointIdConfigNotFound ", 404)
+}
+
+func (o *PatchEndpointIDConfigNotFound) String() string {
+ return fmt.Sprintf("[PATCH /endpoint/{id}/config][%d] patchEndpointIdConfigNotFound ", 404)
+}
+
+func (o *PatchEndpointIDConfigNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
+
+// NewPatchEndpointIDConfigTooManyRequests creates a PatchEndpointIDConfigTooManyRequests with default headers values
+func NewPatchEndpointIDConfigTooManyRequests() *PatchEndpointIDConfigTooManyRequests {
+ return &PatchEndpointIDConfigTooManyRequests{}
+}
+
+/*
+PatchEndpointIDConfigTooManyRequests describes a response with status code 429, with default header values.
+
+Rate-limiting too many requests in the given time frame
+*/
+type PatchEndpointIDConfigTooManyRequests struct {
+}
+
+// IsSuccess returns true when this patch endpoint Id config too many requests response has a 2xx status code
+func (o *PatchEndpointIDConfigTooManyRequests) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this patch endpoint Id config too many requests response has a 3xx status code
+func (o *PatchEndpointIDConfigTooManyRequests) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this patch endpoint Id config too many requests response has a 4xx status code
+func (o *PatchEndpointIDConfigTooManyRequests) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this patch endpoint Id config too many requests response has a 5xx status code
+func (o *PatchEndpointIDConfigTooManyRequests) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this patch endpoint Id config too many requests response a status code equal to that given
+func (o *PatchEndpointIDConfigTooManyRequests) IsCode(code int) bool {
+ return code == 429
+}
+
+func (o *PatchEndpointIDConfigTooManyRequests) Error() string {
+ return fmt.Sprintf("[PATCH /endpoint/{id}/config][%d] patchEndpointIdConfigTooManyRequests ", 429)
+}
+
+func (o *PatchEndpointIDConfigTooManyRequests) String() string {
+ return fmt.Sprintf("[PATCH /endpoint/{id}/config][%d] patchEndpointIdConfigTooManyRequests ", 429)
+}
+
+func (o *PatchEndpointIDConfigTooManyRequests) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
+
+// NewPatchEndpointIDConfigFailed creates a PatchEndpointIDConfigFailed with default headers values
+func NewPatchEndpointIDConfigFailed() *PatchEndpointIDConfigFailed {
+ return &PatchEndpointIDConfigFailed{}
+}
+
+/*
+PatchEndpointIDConfigFailed describes a response with status code 500, with default header values.
+
+Update failed. Details in message.
+*/
+type PatchEndpointIDConfigFailed struct {
+ Payload models.Error
+}
+
+// IsSuccess returns true when this patch endpoint Id config failed response has a 2xx status code
+func (o *PatchEndpointIDConfigFailed) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this patch endpoint Id config failed response has a 3xx status code
+func (o *PatchEndpointIDConfigFailed) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this patch endpoint Id config failed response has a 4xx status code
+func (o *PatchEndpointIDConfigFailed) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this patch endpoint Id config failed response has a 5xx status code
+func (o *PatchEndpointIDConfigFailed) IsServerError() bool {
+ return true
+}
+
+// IsCode returns true when this patch endpoint Id config failed response a status code equal to that given
+func (o *PatchEndpointIDConfigFailed) IsCode(code int) bool {
+ return code == 500
+}
+
+func (o *PatchEndpointIDConfigFailed) Error() string {
+ return fmt.Sprintf("[PATCH /endpoint/{id}/config][%d] patchEndpointIdConfigFailed %+v", 500, o.Payload)
+}
+
+func (o *PatchEndpointIDConfigFailed) String() string {
+ return fmt.Sprintf("[PATCH /endpoint/{id}/config][%d] patchEndpointIdConfigFailed %+v", 500, o.Payload)
+}
+
+func (o *PatchEndpointIDConfigFailed) GetPayload() models.Error {
+ return o.Payload
+}
+
+func (o *PatchEndpointIDConfigFailed) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/patch_endpoint_id_labels_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/patch_endpoint_id_labels_parameters.go
new file mode 100644
index 000000000..8091565f5
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/patch_endpoint_id_labels_parameters.go
@@ -0,0 +1,189 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package endpoint
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/cilium/cilium/api/v1/models"
+)
+
+// NewPatchEndpointIDLabelsParams creates a new PatchEndpointIDLabelsParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewPatchEndpointIDLabelsParams() *PatchEndpointIDLabelsParams {
+ return &PatchEndpointIDLabelsParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewPatchEndpointIDLabelsParamsWithTimeout creates a new PatchEndpointIDLabelsParams object
+// with the ability to set a timeout on a request.
+func NewPatchEndpointIDLabelsParamsWithTimeout(timeout time.Duration) *PatchEndpointIDLabelsParams {
+ return &PatchEndpointIDLabelsParams{
+ timeout: timeout,
+ }
+}
+
+// NewPatchEndpointIDLabelsParamsWithContext creates a new PatchEndpointIDLabelsParams object
+// with the ability to set a context for a request.
+func NewPatchEndpointIDLabelsParamsWithContext(ctx context.Context) *PatchEndpointIDLabelsParams {
+ return &PatchEndpointIDLabelsParams{
+ Context: ctx,
+ }
+}
+
+// NewPatchEndpointIDLabelsParamsWithHTTPClient creates a new PatchEndpointIDLabelsParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewPatchEndpointIDLabelsParamsWithHTTPClient(client *http.Client) *PatchEndpointIDLabelsParams {
+ return &PatchEndpointIDLabelsParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+PatchEndpointIDLabelsParams contains all the parameters to send to the API endpoint
+
+ for the patch endpoint ID labels operation.
+
+ Typically these are written to a http.Request.
+*/
+type PatchEndpointIDLabelsParams struct {
+
+ // Configuration.
+ Configuration *models.LabelConfigurationSpec
+
+ /* ID.
+
+ String describing an endpoint with the format ``[prefix:]id``. If no prefix
+ is specified, a prefix of ``cilium-local:`` is assumed. Not all endpoints
+ will be addressable by all endpoint ID prefixes with the exception of the
+ local Cilium UUID which is assigned to all endpoints.
+
+ Supported endpoint id prefixes:
+ - cilium-local: Local Cilium endpoint UUID, e.g. cilium-local:3389595
+ - cilium-global: Global Cilium endpoint UUID, e.g. cilium-global:cluster1:nodeX:452343
+ - cni-attachment-id: CNI attachment ID, e.g. cni-attachment-id:22222:eth0
+ - container-id: Container runtime ID, e.g. container-id:22222 (deprecated, may not be unique)
+ - container-name: Container name, e.g. container-name:foobar (deprecated, may not be unique)
+ - pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar (deprecated, may not be unique)
+ - cep-name: cep name for this container if K8s is enabled, e.g. pod-name:default:foobar-net1
+ - docker-endpoint: Docker libnetwork endpoint ID, e.g. docker-endpoint:4444
+
+ */
+ ID string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the patch endpoint ID labels params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *PatchEndpointIDLabelsParams) WithDefaults() *PatchEndpointIDLabelsParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the patch endpoint ID labels params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *PatchEndpointIDLabelsParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the patch endpoint ID labels params
+func (o *PatchEndpointIDLabelsParams) WithTimeout(timeout time.Duration) *PatchEndpointIDLabelsParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the patch endpoint ID labels params
+func (o *PatchEndpointIDLabelsParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the patch endpoint ID labels params
+func (o *PatchEndpointIDLabelsParams) WithContext(ctx context.Context) *PatchEndpointIDLabelsParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the patch endpoint ID labels params
+func (o *PatchEndpointIDLabelsParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the patch endpoint ID labels params
+func (o *PatchEndpointIDLabelsParams) WithHTTPClient(client *http.Client) *PatchEndpointIDLabelsParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the patch endpoint ID labels params
+func (o *PatchEndpointIDLabelsParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithConfiguration adds the configuration to the patch endpoint ID labels params
+func (o *PatchEndpointIDLabelsParams) WithConfiguration(configuration *models.LabelConfigurationSpec) *PatchEndpointIDLabelsParams {
+ o.SetConfiguration(configuration)
+ return o
+}
+
+// SetConfiguration adds the configuration to the patch endpoint ID labels params
+func (o *PatchEndpointIDLabelsParams) SetConfiguration(configuration *models.LabelConfigurationSpec) {
+ o.Configuration = configuration
+}
+
+// WithID adds the id to the patch endpoint ID labels params
+func (o *PatchEndpointIDLabelsParams) WithID(id string) *PatchEndpointIDLabelsParams {
+ o.SetID(id)
+ return o
+}
+
+// SetID adds the id to the patch endpoint ID labels params
+func (o *PatchEndpointIDLabelsParams) SetID(id string) {
+ o.ID = id
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *PatchEndpointIDLabelsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+ if o.Configuration != nil {
+ if err := r.SetBodyParam(o.Configuration); err != nil {
+ return err
+ }
+ }
+
+ // path param id
+ if err := r.SetPathParam("id", o.ID); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/patch_endpoint_id_labels_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/patch_endpoint_id_labels_responses.go
new file mode 100644
index 000000000..d16eec5c3
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/patch_endpoint_id_labels_responses.go
@@ -0,0 +1,327 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package endpoint
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/cilium/cilium/api/v1/models"
+)
+
+// PatchEndpointIDLabelsReader is a Reader for the PatchEndpointIDLabels structure.
+type PatchEndpointIDLabelsReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *PatchEndpointIDLabelsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewPatchEndpointIDLabelsOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 403:
+ result := NewPatchEndpointIDLabelsForbidden()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ case 404:
+ result := NewPatchEndpointIDLabelsNotFound()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ case 429:
+ result := NewPatchEndpointIDLabelsTooManyRequests()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ case 500:
+ result := NewPatchEndpointIDLabelsUpdateFailed()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ default:
+ return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code())
+ }
+}
+
+// NewPatchEndpointIDLabelsOK creates a PatchEndpointIDLabelsOK with default headers values
+func NewPatchEndpointIDLabelsOK() *PatchEndpointIDLabelsOK {
+ return &PatchEndpointIDLabelsOK{}
+}
+
+/*
+PatchEndpointIDLabelsOK describes a response with status code 200, with default header values.
+
+Success
+*/
+type PatchEndpointIDLabelsOK struct {
+}
+
+// IsSuccess returns true when this patch endpoint Id labels o k response has a 2xx status code
+func (o *PatchEndpointIDLabelsOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this patch endpoint Id labels o k response has a 3xx status code
+func (o *PatchEndpointIDLabelsOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this patch endpoint Id labels o k response has a 4xx status code
+func (o *PatchEndpointIDLabelsOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this patch endpoint Id labels o k response has a 5xx status code
+func (o *PatchEndpointIDLabelsOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this patch endpoint Id labels o k response a status code equal to that given
+func (o *PatchEndpointIDLabelsOK) IsCode(code int) bool {
+ return code == 200
+}
+
+func (o *PatchEndpointIDLabelsOK) Error() string {
+ return fmt.Sprintf("[PATCH /endpoint/{id}/labels][%d] patchEndpointIdLabelsOK ", 200)
+}
+
+func (o *PatchEndpointIDLabelsOK) String() string {
+ return fmt.Sprintf("[PATCH /endpoint/{id}/labels][%d] patchEndpointIdLabelsOK ", 200)
+}
+
+func (o *PatchEndpointIDLabelsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
+
+// NewPatchEndpointIDLabelsForbidden creates a PatchEndpointIDLabelsForbidden with default headers values
+func NewPatchEndpointIDLabelsForbidden() *PatchEndpointIDLabelsForbidden {
+ return &PatchEndpointIDLabelsForbidden{}
+}
+
+/*
+PatchEndpointIDLabelsForbidden describes a response with status code 403, with default header values.
+
+Forbidden
+*/
+type PatchEndpointIDLabelsForbidden struct {
+}
+
+// IsSuccess returns true when this patch endpoint Id labels forbidden response has a 2xx status code
+func (o *PatchEndpointIDLabelsForbidden) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this patch endpoint Id labels forbidden response has a 3xx status code
+func (o *PatchEndpointIDLabelsForbidden) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this patch endpoint Id labels forbidden response has a 4xx status code
+func (o *PatchEndpointIDLabelsForbidden) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this patch endpoint Id labels forbidden response has a 5xx status code
+func (o *PatchEndpointIDLabelsForbidden) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this patch endpoint Id labels forbidden response a status code equal to that given
+func (o *PatchEndpointIDLabelsForbidden) IsCode(code int) bool {
+ return code == 403
+}
+
+func (o *PatchEndpointIDLabelsForbidden) Error() string {
+ return fmt.Sprintf("[PATCH /endpoint/{id}/labels][%d] patchEndpointIdLabelsForbidden ", 403)
+}
+
+func (o *PatchEndpointIDLabelsForbidden) String() string {
+ return fmt.Sprintf("[PATCH /endpoint/{id}/labels][%d] patchEndpointIdLabelsForbidden ", 403)
+}
+
+func (o *PatchEndpointIDLabelsForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
+
+// NewPatchEndpointIDLabelsNotFound creates a PatchEndpointIDLabelsNotFound with default headers values
+func NewPatchEndpointIDLabelsNotFound() *PatchEndpointIDLabelsNotFound {
+ return &PatchEndpointIDLabelsNotFound{}
+}
+
+/*
+PatchEndpointIDLabelsNotFound describes a response with status code 404, with default header values.
+
+Endpoint not found
+*/
+type PatchEndpointIDLabelsNotFound struct {
+}
+
+// IsSuccess returns true when this patch endpoint Id labels not found response has a 2xx status code
+func (o *PatchEndpointIDLabelsNotFound) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this patch endpoint Id labels not found response has a 3xx status code
+func (o *PatchEndpointIDLabelsNotFound) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this patch endpoint Id labels not found response has a 4xx status code
+func (o *PatchEndpointIDLabelsNotFound) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this patch endpoint Id labels not found response has a 5xx status code
+func (o *PatchEndpointIDLabelsNotFound) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this patch endpoint Id labels not found response a status code equal to that given
+func (o *PatchEndpointIDLabelsNotFound) IsCode(code int) bool {
+ return code == 404
+}
+
+func (o *PatchEndpointIDLabelsNotFound) Error() string {
+ return fmt.Sprintf("[PATCH /endpoint/{id}/labels][%d] patchEndpointIdLabelsNotFound ", 404)
+}
+
+func (o *PatchEndpointIDLabelsNotFound) String() string {
+ return fmt.Sprintf("[PATCH /endpoint/{id}/labels][%d] patchEndpointIdLabelsNotFound ", 404)
+}
+
+func (o *PatchEndpointIDLabelsNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
+
+// NewPatchEndpointIDLabelsTooManyRequests creates a PatchEndpointIDLabelsTooManyRequests with default headers values
+func NewPatchEndpointIDLabelsTooManyRequests() *PatchEndpointIDLabelsTooManyRequests {
+ return &PatchEndpointIDLabelsTooManyRequests{}
+}
+
+/*
+PatchEndpointIDLabelsTooManyRequests describes a response with status code 429, with default header values.
+
+Rate-limiting too many requests in the given time frame
+*/
+type PatchEndpointIDLabelsTooManyRequests struct {
+}
+
+// IsSuccess returns true when this patch endpoint Id labels too many requests response has a 2xx status code
+func (o *PatchEndpointIDLabelsTooManyRequests) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this patch endpoint Id labels too many requests response has a 3xx status code
+func (o *PatchEndpointIDLabelsTooManyRequests) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this patch endpoint Id labels too many requests response has a 4xx status code
+func (o *PatchEndpointIDLabelsTooManyRequests) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this patch endpoint Id labels too many requests response has a 5xx status code
+func (o *PatchEndpointIDLabelsTooManyRequests) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this patch endpoint Id labels too many requests response a status code equal to that given
+func (o *PatchEndpointIDLabelsTooManyRequests) IsCode(code int) bool {
+ return code == 429
+}
+
+func (o *PatchEndpointIDLabelsTooManyRequests) Error() string {
+ return fmt.Sprintf("[PATCH /endpoint/{id}/labels][%d] patchEndpointIdLabelsTooManyRequests ", 429)
+}
+
+func (o *PatchEndpointIDLabelsTooManyRequests) String() string {
+ return fmt.Sprintf("[PATCH /endpoint/{id}/labels][%d] patchEndpointIdLabelsTooManyRequests ", 429)
+}
+
+func (o *PatchEndpointIDLabelsTooManyRequests) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
+
+// NewPatchEndpointIDLabelsUpdateFailed creates a PatchEndpointIDLabelsUpdateFailed with default headers values
+func NewPatchEndpointIDLabelsUpdateFailed() *PatchEndpointIDLabelsUpdateFailed {
+ return &PatchEndpointIDLabelsUpdateFailed{}
+}
+
+/*
+PatchEndpointIDLabelsUpdateFailed describes a response with status code 500, with default header values.
+
+Error while updating labels
+*/
+type PatchEndpointIDLabelsUpdateFailed struct {
+ Payload models.Error
+}
+
+// IsSuccess returns true when this patch endpoint Id labels update failed response has a 2xx status code
+func (o *PatchEndpointIDLabelsUpdateFailed) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this patch endpoint Id labels update failed response has a 3xx status code
+func (o *PatchEndpointIDLabelsUpdateFailed) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this patch endpoint Id labels update failed response has a 4xx status code
+func (o *PatchEndpointIDLabelsUpdateFailed) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this patch endpoint Id labels update failed response has a 5xx status code
+func (o *PatchEndpointIDLabelsUpdateFailed) IsServerError() bool {
+ return true
+}
+
+// IsCode returns true when this patch endpoint Id labels update failed response a status code equal to that given
+func (o *PatchEndpointIDLabelsUpdateFailed) IsCode(code int) bool {
+ return code == 500
+}
+
+func (o *PatchEndpointIDLabelsUpdateFailed) Error() string {
+ return fmt.Sprintf("[PATCH /endpoint/{id}/labels][%d] patchEndpointIdLabelsUpdateFailed %+v", 500, o.Payload)
+}
+
+func (o *PatchEndpointIDLabelsUpdateFailed) String() string {
+ return fmt.Sprintf("[PATCH /endpoint/{id}/labels][%d] patchEndpointIdLabelsUpdateFailed %+v", 500, o.Payload)
+}
+
+func (o *PatchEndpointIDLabelsUpdateFailed) GetPayload() models.Error {
+ return o.Payload
+}
+
+func (o *PatchEndpointIDLabelsUpdateFailed) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/patch_endpoint_id_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/patch_endpoint_id_parameters.go
new file mode 100644
index 000000000..f718d1982
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/patch_endpoint_id_parameters.go
@@ -0,0 +1,189 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package endpoint
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/cilium/cilium/api/v1/models"
+)
+
+// NewPatchEndpointIDParams creates a new PatchEndpointIDParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewPatchEndpointIDParams() *PatchEndpointIDParams {
+ return &PatchEndpointIDParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewPatchEndpointIDParamsWithTimeout creates a new PatchEndpointIDParams object
+// with the ability to set a timeout on a request.
+func NewPatchEndpointIDParamsWithTimeout(timeout time.Duration) *PatchEndpointIDParams {
+ return &PatchEndpointIDParams{
+ timeout: timeout,
+ }
+}
+
+// NewPatchEndpointIDParamsWithContext creates a new PatchEndpointIDParams object
+// with the ability to set a context for a request.
+func NewPatchEndpointIDParamsWithContext(ctx context.Context) *PatchEndpointIDParams {
+ return &PatchEndpointIDParams{
+ Context: ctx,
+ }
+}
+
+// NewPatchEndpointIDParamsWithHTTPClient creates a new PatchEndpointIDParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewPatchEndpointIDParamsWithHTTPClient(client *http.Client) *PatchEndpointIDParams {
+ return &PatchEndpointIDParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+PatchEndpointIDParams contains all the parameters to send to the API endpoint
+
+ for the patch endpoint ID operation.
+
+ Typically these are written to a http.Request.
+*/
+type PatchEndpointIDParams struct {
+
+ // Endpoint.
+ Endpoint *models.EndpointChangeRequest
+
+ /* ID.
+
+ String describing an endpoint with the format ``[prefix:]id``. If no prefix
+ is specified, a prefix of ``cilium-local:`` is assumed. Not all endpoints
+ will be addressable by all endpoint ID prefixes with the exception of the
+ local Cilium UUID which is assigned to all endpoints.
+
+ Supported endpoint id prefixes:
+ - cilium-local: Local Cilium endpoint UUID, e.g. cilium-local:3389595
+ - cilium-global: Global Cilium endpoint UUID, e.g. cilium-global:cluster1:nodeX:452343
+ - cni-attachment-id: CNI attachment ID, e.g. cni-attachment-id:22222:eth0
+ - container-id: Container runtime ID, e.g. container-id:22222 (deprecated, may not be unique)
+ - container-name: Container name, e.g. container-name:foobar (deprecated, may not be unique)
+ - pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar (deprecated, may not be unique)
+ - cep-name: cep name for this container if K8s is enabled, e.g. pod-name:default:foobar-net1
+ - docker-endpoint: Docker libnetwork endpoint ID, e.g. docker-endpoint:4444
+
+ */
+ ID string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the patch endpoint ID params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *PatchEndpointIDParams) WithDefaults() *PatchEndpointIDParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the patch endpoint ID params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *PatchEndpointIDParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the patch endpoint ID params
+func (o *PatchEndpointIDParams) WithTimeout(timeout time.Duration) *PatchEndpointIDParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the patch endpoint ID params
+func (o *PatchEndpointIDParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the patch endpoint ID params
+func (o *PatchEndpointIDParams) WithContext(ctx context.Context) *PatchEndpointIDParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the patch endpoint ID params
+func (o *PatchEndpointIDParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the patch endpoint ID params
+func (o *PatchEndpointIDParams) WithHTTPClient(client *http.Client) *PatchEndpointIDParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the patch endpoint ID params
+func (o *PatchEndpointIDParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithEndpoint adds the endpoint to the patch endpoint ID params
+func (o *PatchEndpointIDParams) WithEndpoint(endpoint *models.EndpointChangeRequest) *PatchEndpointIDParams {
+ o.SetEndpoint(endpoint)
+ return o
+}
+
+// SetEndpoint adds the endpoint to the patch endpoint ID params
+func (o *PatchEndpointIDParams) SetEndpoint(endpoint *models.EndpointChangeRequest) {
+ o.Endpoint = endpoint
+}
+
+// WithID adds the id to the patch endpoint ID params
+func (o *PatchEndpointIDParams) WithID(id string) *PatchEndpointIDParams {
+ o.SetID(id)
+ return o
+}
+
+// SetID adds the id to the patch endpoint ID params
+func (o *PatchEndpointIDParams) SetID(id string) {
+ o.ID = id
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *PatchEndpointIDParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+ if o.Endpoint != nil {
+ if err := r.SetBodyParam(o.Endpoint); err != nil {
+ return err
+ }
+ }
+
+ // path param id
+ if err := r.SetPathParam("id", o.ID); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/patch_endpoint_id_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/patch_endpoint_id_responses.go
new file mode 100644
index 000000000..7b28de272
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/patch_endpoint_id_responses.go
@@ -0,0 +1,394 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package endpoint
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/cilium/cilium/api/v1/models"
+)
+
+// PatchEndpointIDReader is a Reader for the PatchEndpointID structure.
+type PatchEndpointIDReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *PatchEndpointIDReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewPatchEndpointIDOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 400:
+ result := NewPatchEndpointIDInvalid()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ case 403:
+ result := NewPatchEndpointIDForbidden()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ case 404:
+ result := NewPatchEndpointIDNotFound()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ case 429:
+ result := NewPatchEndpointIDTooManyRequests()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ case 500:
+ result := NewPatchEndpointIDFailed()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ default:
+ return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code())
+ }
+}
+
+// NewPatchEndpointIDOK creates a PatchEndpointIDOK with default headers values
+func NewPatchEndpointIDOK() *PatchEndpointIDOK {
+ return &PatchEndpointIDOK{}
+}
+
+/*
+PatchEndpointIDOK describes a response with status code 200, with default header values.
+
+Success
+*/
+type PatchEndpointIDOK struct {
+}
+
+// IsSuccess returns true when this patch endpoint Id o k response has a 2xx status code
+func (o *PatchEndpointIDOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this patch endpoint Id o k response has a 3xx status code
+func (o *PatchEndpointIDOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this patch endpoint Id o k response has a 4xx status code
+func (o *PatchEndpointIDOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this patch endpoint Id o k response has a 5xx status code
+func (o *PatchEndpointIDOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this patch endpoint Id o k response a status code equal to that given
+func (o *PatchEndpointIDOK) IsCode(code int) bool {
+ return code == 200
+}
+
+func (o *PatchEndpointIDOK) Error() string {
+ return fmt.Sprintf("[PATCH /endpoint/{id}][%d] patchEndpointIdOK ", 200)
+}
+
+func (o *PatchEndpointIDOK) String() string {
+ return fmt.Sprintf("[PATCH /endpoint/{id}][%d] patchEndpointIdOK ", 200)
+}
+
+func (o *PatchEndpointIDOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
+
+// NewPatchEndpointIDInvalid creates a PatchEndpointIDInvalid with default headers values
+func NewPatchEndpointIDInvalid() *PatchEndpointIDInvalid {
+ return &PatchEndpointIDInvalid{}
+}
+
+/*
+PatchEndpointIDInvalid describes a response with status code 400, with default header values.
+
+Invalid modify endpoint request
+*/
+type PatchEndpointIDInvalid struct {
+ Payload models.Error
+}
+
+// IsSuccess returns true when this patch endpoint Id invalid response has a 2xx status code
+func (o *PatchEndpointIDInvalid) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this patch endpoint Id invalid response has a 3xx status code
+func (o *PatchEndpointIDInvalid) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this patch endpoint Id invalid response has a 4xx status code
+func (o *PatchEndpointIDInvalid) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this patch endpoint Id invalid response has a 5xx status code
+func (o *PatchEndpointIDInvalid) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this patch endpoint Id invalid response a status code equal to that given
+func (o *PatchEndpointIDInvalid) IsCode(code int) bool {
+ return code == 400
+}
+
+func (o *PatchEndpointIDInvalid) Error() string {
+ return fmt.Sprintf("[PATCH /endpoint/{id}][%d] patchEndpointIdInvalid %+v", 400, o.Payload)
+}
+
+func (o *PatchEndpointIDInvalid) String() string {
+ return fmt.Sprintf("[PATCH /endpoint/{id}][%d] patchEndpointIdInvalid %+v", 400, o.Payload)
+}
+
+func (o *PatchEndpointIDInvalid) GetPayload() models.Error {
+ return o.Payload
+}
+
+func (o *PatchEndpointIDInvalid) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewPatchEndpointIDForbidden creates a PatchEndpointIDForbidden with default headers values
+func NewPatchEndpointIDForbidden() *PatchEndpointIDForbidden {
+ return &PatchEndpointIDForbidden{}
+}
+
+/*
+PatchEndpointIDForbidden describes a response with status code 403, with default header values.
+
+Forbidden
+*/
+type PatchEndpointIDForbidden struct {
+}
+
+// IsSuccess returns true when this patch endpoint Id forbidden response has a 2xx status code
+func (o *PatchEndpointIDForbidden) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this patch endpoint Id forbidden response has a 3xx status code
+func (o *PatchEndpointIDForbidden) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this patch endpoint Id forbidden response has a 4xx status code
+func (o *PatchEndpointIDForbidden) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this patch endpoint Id forbidden response has a 5xx status code
+func (o *PatchEndpointIDForbidden) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this patch endpoint Id forbidden response a status code equal to that given
+func (o *PatchEndpointIDForbidden) IsCode(code int) bool {
+ return code == 403
+}
+
+func (o *PatchEndpointIDForbidden) Error() string {
+ return fmt.Sprintf("[PATCH /endpoint/{id}][%d] patchEndpointIdForbidden ", 403)
+}
+
+func (o *PatchEndpointIDForbidden) String() string {
+ return fmt.Sprintf("[PATCH /endpoint/{id}][%d] patchEndpointIdForbidden ", 403)
+}
+
+func (o *PatchEndpointIDForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
+
+// NewPatchEndpointIDNotFound creates a PatchEndpointIDNotFound with default headers values
+func NewPatchEndpointIDNotFound() *PatchEndpointIDNotFound {
+ return &PatchEndpointIDNotFound{}
+}
+
+/*
+PatchEndpointIDNotFound describes a response with status code 404, with default header values.
+
+Endpoint does not exist
+*/
+type PatchEndpointIDNotFound struct {
+}
+
+// IsSuccess returns true when this patch endpoint Id not found response has a 2xx status code
+func (o *PatchEndpointIDNotFound) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this patch endpoint Id not found response has a 3xx status code
+func (o *PatchEndpointIDNotFound) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this patch endpoint Id not found response has a 4xx status code
+func (o *PatchEndpointIDNotFound) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this patch endpoint Id not found response has a 5xx status code
+func (o *PatchEndpointIDNotFound) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this patch endpoint Id not found response a status code equal to that given
+func (o *PatchEndpointIDNotFound) IsCode(code int) bool {
+ return code == 404
+}
+
+func (o *PatchEndpointIDNotFound) Error() string {
+ return fmt.Sprintf("[PATCH /endpoint/{id}][%d] patchEndpointIdNotFound ", 404)
+}
+
+func (o *PatchEndpointIDNotFound) String() string {
+ return fmt.Sprintf("[PATCH /endpoint/{id}][%d] patchEndpointIdNotFound ", 404)
+}
+
+func (o *PatchEndpointIDNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
+
+// NewPatchEndpointIDTooManyRequests creates a PatchEndpointIDTooManyRequests with default headers values
+func NewPatchEndpointIDTooManyRequests() *PatchEndpointIDTooManyRequests {
+ return &PatchEndpointIDTooManyRequests{}
+}
+
+/*
+PatchEndpointIDTooManyRequests describes a response with status code 429, with default header values.
+
+Rate-limiting too many requests in the given time frame
+*/
+type PatchEndpointIDTooManyRequests struct {
+}
+
+// IsSuccess returns true when this patch endpoint Id too many requests response has a 2xx status code
+func (o *PatchEndpointIDTooManyRequests) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this patch endpoint Id too many requests response has a 3xx status code
+func (o *PatchEndpointIDTooManyRequests) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this patch endpoint Id too many requests response has a 4xx status code
+func (o *PatchEndpointIDTooManyRequests) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this patch endpoint Id too many requests response has a 5xx status code
+func (o *PatchEndpointIDTooManyRequests) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this patch endpoint Id too many requests response a status code equal to that given
+func (o *PatchEndpointIDTooManyRequests) IsCode(code int) bool {
+ return code == 429
+}
+
+func (o *PatchEndpointIDTooManyRequests) Error() string {
+ return fmt.Sprintf("[PATCH /endpoint/{id}][%d] patchEndpointIdTooManyRequests ", 429)
+}
+
+func (o *PatchEndpointIDTooManyRequests) String() string {
+ return fmt.Sprintf("[PATCH /endpoint/{id}][%d] patchEndpointIdTooManyRequests ", 429)
+}
+
+func (o *PatchEndpointIDTooManyRequests) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
+
+// NewPatchEndpointIDFailed creates a PatchEndpointIDFailed with default headers values
+func NewPatchEndpointIDFailed() *PatchEndpointIDFailed {
+ return &PatchEndpointIDFailed{}
+}
+
+/*
+PatchEndpointIDFailed describes a response with status code 500, with default header values.
+
+Endpoint update failed
+*/
+type PatchEndpointIDFailed struct {
+ Payload models.Error
+}
+
+// IsSuccess returns true when this patch endpoint Id failed response has a 2xx status code
+func (o *PatchEndpointIDFailed) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this patch endpoint Id failed response has a 3xx status code
+func (o *PatchEndpointIDFailed) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this patch endpoint Id failed response has a 4xx status code
+func (o *PatchEndpointIDFailed) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this patch endpoint Id failed response has a 5xx status code
+func (o *PatchEndpointIDFailed) IsServerError() bool {
+ return true
+}
+
+// IsCode returns true when this patch endpoint Id failed response a status code equal to that given
+func (o *PatchEndpointIDFailed) IsCode(code int) bool {
+ return code == 500
+}
+
+func (o *PatchEndpointIDFailed) Error() string {
+ return fmt.Sprintf("[PATCH /endpoint/{id}][%d] patchEndpointIdFailed %+v", 500, o.Payload)
+}
+
+func (o *PatchEndpointIDFailed) String() string {
+ return fmt.Sprintf("[PATCH /endpoint/{id}][%d] patchEndpointIdFailed %+v", 500, o.Payload)
+}
+
+func (o *PatchEndpointIDFailed) GetPayload() models.Error {
+ return o.Payload
+}
+
+func (o *PatchEndpointIDFailed) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/put_endpoint_id_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/put_endpoint_id_parameters.go
new file mode 100644
index 000000000..a7342cba0
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/put_endpoint_id_parameters.go
@@ -0,0 +1,189 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package endpoint
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/cilium/cilium/api/v1/models"
+)
+
+// NewPutEndpointIDParams creates a new PutEndpointIDParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewPutEndpointIDParams() *PutEndpointIDParams {
+ return &PutEndpointIDParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewPutEndpointIDParamsWithTimeout creates a new PutEndpointIDParams object
+// with the ability to set a timeout on a request.
+func NewPutEndpointIDParamsWithTimeout(timeout time.Duration) *PutEndpointIDParams {
+ return &PutEndpointIDParams{
+ timeout: timeout,
+ }
+}
+
+// NewPutEndpointIDParamsWithContext creates a new PutEndpointIDParams object
+// with the ability to set a context for a request.
+func NewPutEndpointIDParamsWithContext(ctx context.Context) *PutEndpointIDParams {
+ return &PutEndpointIDParams{
+ Context: ctx,
+ }
+}
+
+// NewPutEndpointIDParamsWithHTTPClient creates a new PutEndpointIDParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewPutEndpointIDParamsWithHTTPClient(client *http.Client) *PutEndpointIDParams {
+ return &PutEndpointIDParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+PutEndpointIDParams contains all the parameters to send to the API endpoint
+
+ for the put endpoint ID operation.
+
+ Typically these are written to a http.Request.
+*/
+type PutEndpointIDParams struct {
+
+ // Endpoint.
+ Endpoint *models.EndpointChangeRequest
+
+ /* ID.
+
+ String describing an endpoint with the format ``[prefix:]id``. If no prefix
+ is specified, a prefix of ``cilium-local:`` is assumed. Not all endpoints
+ will be addressable by all endpoint ID prefixes with the exception of the
+ local Cilium UUID which is assigned to all endpoints.
+
+ Supported endpoint id prefixes:
+ - cilium-local: Local Cilium endpoint UUID, e.g. cilium-local:3389595
+ - cilium-global: Global Cilium endpoint UUID, e.g. cilium-global:cluster1:nodeX:452343
+ - cni-attachment-id: CNI attachment ID, e.g. cni-attachment-id:22222:eth0
+ - container-id: Container runtime ID, e.g. container-id:22222 (deprecated, may not be unique)
+ - container-name: Container name, e.g. container-name:foobar (deprecated, may not be unique)
+ - pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar (deprecated, may not be unique)
+ - cep-name: cep name for this container if K8s is enabled, e.g. pod-name:default:foobar-net1
+ - docker-endpoint: Docker libnetwork endpoint ID, e.g. docker-endpoint:4444
+
+ */
+ ID string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the put endpoint ID params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *PutEndpointIDParams) WithDefaults() *PutEndpointIDParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the put endpoint ID params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *PutEndpointIDParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the put endpoint ID params
+func (o *PutEndpointIDParams) WithTimeout(timeout time.Duration) *PutEndpointIDParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the put endpoint ID params
+func (o *PutEndpointIDParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the put endpoint ID params
+func (o *PutEndpointIDParams) WithContext(ctx context.Context) *PutEndpointIDParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the put endpoint ID params
+func (o *PutEndpointIDParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the put endpoint ID params
+func (o *PutEndpointIDParams) WithHTTPClient(client *http.Client) *PutEndpointIDParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the put endpoint ID params
+func (o *PutEndpointIDParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithEndpoint adds the endpoint to the put endpoint ID params
+func (o *PutEndpointIDParams) WithEndpoint(endpoint *models.EndpointChangeRequest) *PutEndpointIDParams {
+ o.SetEndpoint(endpoint)
+ return o
+}
+
+// SetEndpoint adds the endpoint to the put endpoint ID params
+func (o *PutEndpointIDParams) SetEndpoint(endpoint *models.EndpointChangeRequest) {
+ o.Endpoint = endpoint
+}
+
+// WithID adds the id to the put endpoint ID params
+func (o *PutEndpointIDParams) WithID(id string) *PutEndpointIDParams {
+ o.SetID(id)
+ return o
+}
+
+// SetID adds the id to the put endpoint ID params
+func (o *PutEndpointIDParams) SetID(id string) {
+ o.ID = id
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *PutEndpointIDParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+ if o.Endpoint != nil {
+ if err := r.SetBodyParam(o.Endpoint); err != nil {
+ return err
+ }
+ }
+
+ // path param id
+ if err := r.SetPathParam("id", o.ID); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/put_endpoint_id_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/put_endpoint_id_responses.go
new file mode 100644
index 000000000..97147e563
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/put_endpoint_id_responses.go
@@ -0,0 +1,394 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package endpoint
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/cilium/cilium/api/v1/models"
+)
+
+// PutEndpointIDReader is a Reader for the PutEndpointID structure.
+type PutEndpointIDReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *PutEndpointIDReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 201:
+ result := NewPutEndpointIDCreated()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 400:
+ result := NewPutEndpointIDInvalid()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ case 403:
+ result := NewPutEndpointIDForbidden()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ case 409:
+ result := NewPutEndpointIDExists()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ case 429:
+ result := NewPutEndpointIDTooManyRequests()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ case 500:
+ result := NewPutEndpointIDFailed()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ default:
+ return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code())
+ }
+}
+
+// NewPutEndpointIDCreated creates a PutEndpointIDCreated with default headers values
+func NewPutEndpointIDCreated() *PutEndpointIDCreated {
+ return &PutEndpointIDCreated{}
+}
+
+/*
+PutEndpointIDCreated describes a response with status code 201, with default header values.
+
+Created
+*/
+type PutEndpointIDCreated struct {
+}
+
+// IsSuccess returns true when this put endpoint Id created response has a 2xx status code
+func (o *PutEndpointIDCreated) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this put endpoint Id created response has a 3xx status code
+func (o *PutEndpointIDCreated) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this put endpoint Id created response has a 4xx status code
+func (o *PutEndpointIDCreated) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this put endpoint Id created response has a 5xx status code
+func (o *PutEndpointIDCreated) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this put endpoint Id created response a status code equal to that given
+func (o *PutEndpointIDCreated) IsCode(code int) bool {
+ return code == 201
+}
+
+func (o *PutEndpointIDCreated) Error() string {
+ return fmt.Sprintf("[PUT /endpoint/{id}][%d] putEndpointIdCreated ", 201)
+}
+
+func (o *PutEndpointIDCreated) String() string {
+ return fmt.Sprintf("[PUT /endpoint/{id}][%d] putEndpointIdCreated ", 201)
+}
+
+func (o *PutEndpointIDCreated) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
+
+// NewPutEndpointIDInvalid creates a PutEndpointIDInvalid with default headers values
+func NewPutEndpointIDInvalid() *PutEndpointIDInvalid {
+ return &PutEndpointIDInvalid{}
+}
+
+/*
+PutEndpointIDInvalid describes a response with status code 400, with default header values.
+
+Invalid endpoint in request
+*/
+type PutEndpointIDInvalid struct {
+ Payload models.Error
+}
+
+// IsSuccess returns true when this put endpoint Id invalid response has a 2xx status code
+func (o *PutEndpointIDInvalid) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this put endpoint Id invalid response has a 3xx status code
+func (o *PutEndpointIDInvalid) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this put endpoint Id invalid response has a 4xx status code
+func (o *PutEndpointIDInvalid) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this put endpoint Id invalid response has a 5xx status code
+func (o *PutEndpointIDInvalid) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this put endpoint Id invalid response a status code equal to that given
+func (o *PutEndpointIDInvalid) IsCode(code int) bool {
+ return code == 400
+}
+
+func (o *PutEndpointIDInvalid) Error() string {
+ return fmt.Sprintf("[PUT /endpoint/{id}][%d] putEndpointIdInvalid %+v", 400, o.Payload)
+}
+
+func (o *PutEndpointIDInvalid) String() string {
+ return fmt.Sprintf("[PUT /endpoint/{id}][%d] putEndpointIdInvalid %+v", 400, o.Payload)
+}
+
+func (o *PutEndpointIDInvalid) GetPayload() models.Error {
+ return o.Payload
+}
+
+func (o *PutEndpointIDInvalid) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewPutEndpointIDForbidden creates a PutEndpointIDForbidden with default headers values
+func NewPutEndpointIDForbidden() *PutEndpointIDForbidden {
+ return &PutEndpointIDForbidden{}
+}
+
+/*
+PutEndpointIDForbidden describes a response with status code 403, with default header values.
+
+Forbidden
+*/
+type PutEndpointIDForbidden struct {
+}
+
+// IsSuccess returns true when this put endpoint Id forbidden response has a 2xx status code
+func (o *PutEndpointIDForbidden) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this put endpoint Id forbidden response has a 3xx status code
+func (o *PutEndpointIDForbidden) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this put endpoint Id forbidden response has a 4xx status code
+func (o *PutEndpointIDForbidden) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this put endpoint Id forbidden response has a 5xx status code
+func (o *PutEndpointIDForbidden) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this put endpoint Id forbidden response a status code equal to that given
+func (o *PutEndpointIDForbidden) IsCode(code int) bool {
+ return code == 403
+}
+
+func (o *PutEndpointIDForbidden) Error() string {
+ return fmt.Sprintf("[PUT /endpoint/{id}][%d] putEndpointIdForbidden ", 403)
+}
+
+func (o *PutEndpointIDForbidden) String() string {
+ return fmt.Sprintf("[PUT /endpoint/{id}][%d] putEndpointIdForbidden ", 403)
+}
+
+func (o *PutEndpointIDForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
+
+// NewPutEndpointIDExists creates a PutEndpointIDExists with default headers values
+func NewPutEndpointIDExists() *PutEndpointIDExists {
+ return &PutEndpointIDExists{}
+}
+
+/*
+PutEndpointIDExists describes a response with status code 409, with default header values.
+
+Endpoint already exists
+*/
+type PutEndpointIDExists struct {
+}
+
+// IsSuccess returns true when this put endpoint Id exists response has a 2xx status code
+func (o *PutEndpointIDExists) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this put endpoint Id exists response has a 3xx status code
+func (o *PutEndpointIDExists) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this put endpoint Id exists response has a 4xx status code
+func (o *PutEndpointIDExists) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this put endpoint Id exists response has a 5xx status code
+func (o *PutEndpointIDExists) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this put endpoint Id exists response a status code equal to that given
+func (o *PutEndpointIDExists) IsCode(code int) bool {
+ return code == 409
+}
+
+func (o *PutEndpointIDExists) Error() string {
+ return fmt.Sprintf("[PUT /endpoint/{id}][%d] putEndpointIdExists ", 409)
+}
+
+func (o *PutEndpointIDExists) String() string {
+ return fmt.Sprintf("[PUT /endpoint/{id}][%d] putEndpointIdExists ", 409)
+}
+
+func (o *PutEndpointIDExists) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
+
+// NewPutEndpointIDTooManyRequests creates a PutEndpointIDTooManyRequests with default headers values
+func NewPutEndpointIDTooManyRequests() *PutEndpointIDTooManyRequests {
+ return &PutEndpointIDTooManyRequests{}
+}
+
+/*
+PutEndpointIDTooManyRequests describes a response with status code 429, with default header values.
+
+Rate-limiting too many requests in the given time frame
+*/
+type PutEndpointIDTooManyRequests struct {
+}
+
+// IsSuccess returns true when this put endpoint Id too many requests response has a 2xx status code
+func (o *PutEndpointIDTooManyRequests) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this put endpoint Id too many requests response has a 3xx status code
+func (o *PutEndpointIDTooManyRequests) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this put endpoint Id too many requests response has a 4xx status code
+func (o *PutEndpointIDTooManyRequests) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this put endpoint Id too many requests response has a 5xx status code
+func (o *PutEndpointIDTooManyRequests) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this put endpoint Id too many requests response a status code equal to that given
+func (o *PutEndpointIDTooManyRequests) IsCode(code int) bool {
+ return code == 429
+}
+
+func (o *PutEndpointIDTooManyRequests) Error() string {
+ return fmt.Sprintf("[PUT /endpoint/{id}][%d] putEndpointIdTooManyRequests ", 429)
+}
+
+func (o *PutEndpointIDTooManyRequests) String() string {
+ return fmt.Sprintf("[PUT /endpoint/{id}][%d] putEndpointIdTooManyRequests ", 429)
+}
+
+func (o *PutEndpointIDTooManyRequests) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
+
+// NewPutEndpointIDFailed creates a PutEndpointIDFailed with default headers values
+func NewPutEndpointIDFailed() *PutEndpointIDFailed {
+ return &PutEndpointIDFailed{}
+}
+
+/*
+PutEndpointIDFailed describes a response with status code 500, with default header values.
+
+Endpoint creation failed
+*/
+type PutEndpointIDFailed struct {
+ Payload models.Error
+}
+
+// IsSuccess returns true when this put endpoint Id failed response has a 2xx status code
+func (o *PutEndpointIDFailed) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this put endpoint Id failed response has a 3xx status code
+func (o *PutEndpointIDFailed) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this put endpoint Id failed response has a 4xx status code
+func (o *PutEndpointIDFailed) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this put endpoint Id failed response has a 5xx status code
+func (o *PutEndpointIDFailed) IsServerError() bool {
+ return true
+}
+
+// IsCode returns true when this put endpoint Id failed response a status code equal to that given
+func (o *PutEndpointIDFailed) IsCode(code int) bool {
+ return code == 500
+}
+
+func (o *PutEndpointIDFailed) Error() string {
+ return fmt.Sprintf("[PUT /endpoint/{id}][%d] putEndpointIdFailed %+v", 500, o.Payload)
+}
+
+func (o *PutEndpointIDFailed) String() string {
+ return fmt.Sprintf("[PUT /endpoint/{id}][%d] putEndpointIdFailed %+v", 500, o.Payload)
+}
+
+func (o *PutEndpointIDFailed) GetPayload() models.Error {
+ return o.Payload
+}
+
+func (o *PutEndpointIDFailed) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/ipam/delete_ipam_ip_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/ipam/delete_ipam_ip_parameters.go
new file mode 100644
index 000000000..73cc82d51
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/ipam/delete_ipam_ip_parameters.go
@@ -0,0 +1,185 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package ipam
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewDeleteIpamIPParams creates a new DeleteIpamIPParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewDeleteIpamIPParams() *DeleteIpamIPParams {
+ return &DeleteIpamIPParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewDeleteIpamIPParamsWithTimeout creates a new DeleteIpamIPParams object
+// with the ability to set a timeout on a request.
+func NewDeleteIpamIPParamsWithTimeout(timeout time.Duration) *DeleteIpamIPParams {
+ return &DeleteIpamIPParams{
+ timeout: timeout,
+ }
+}
+
+// NewDeleteIpamIPParamsWithContext creates a new DeleteIpamIPParams object
+// with the ability to set a context for a request.
+func NewDeleteIpamIPParamsWithContext(ctx context.Context) *DeleteIpamIPParams {
+ return &DeleteIpamIPParams{
+ Context: ctx,
+ }
+}
+
+// NewDeleteIpamIPParamsWithHTTPClient creates a new DeleteIpamIPParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewDeleteIpamIPParamsWithHTTPClient(client *http.Client) *DeleteIpamIPParams {
+ return &DeleteIpamIPParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+DeleteIpamIPParams contains all the parameters to send to the API endpoint
+
+ for the delete ipam IP operation.
+
+ Typically these are written to a http.Request.
+*/
+type DeleteIpamIPParams struct {
+
+ /* IP.
+
+ IP address
+ */
+ IP string
+
+ // Pool.
+ Pool *string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the delete ipam IP params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *DeleteIpamIPParams) WithDefaults() *DeleteIpamIPParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the delete ipam IP params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *DeleteIpamIPParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the delete ipam IP params
+func (o *DeleteIpamIPParams) WithTimeout(timeout time.Duration) *DeleteIpamIPParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the delete ipam IP params
+func (o *DeleteIpamIPParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the delete ipam IP params
+func (o *DeleteIpamIPParams) WithContext(ctx context.Context) *DeleteIpamIPParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the delete ipam IP params
+func (o *DeleteIpamIPParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the delete ipam IP params
+func (o *DeleteIpamIPParams) WithHTTPClient(client *http.Client) *DeleteIpamIPParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the delete ipam IP params
+func (o *DeleteIpamIPParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithIP adds the ip to the delete ipam IP params
+func (o *DeleteIpamIPParams) WithIP(ip string) *DeleteIpamIPParams {
+ o.SetIP(ip)
+ return o
+}
+
+// SetIP adds the ip to the delete ipam IP params
+func (o *DeleteIpamIPParams) SetIP(ip string) {
+ o.IP = ip
+}
+
+// WithPool adds the pool to the delete ipam IP params
+func (o *DeleteIpamIPParams) WithPool(pool *string) *DeleteIpamIPParams {
+ o.SetPool(pool)
+ return o
+}
+
+// SetPool adds the pool to the delete ipam IP params
+func (o *DeleteIpamIPParams) SetPool(pool *string) {
+ o.Pool = pool
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *DeleteIpamIPParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ // path param ip
+ if err := r.SetPathParam("ip", o.IP); err != nil {
+ return err
+ }
+
+ if o.Pool != nil {
+
+ // query param pool
+ var qrPool string
+
+ if o.Pool != nil {
+ qrPool = *o.Pool
+ }
+ qPool := qrPool
+ if qPool != "" {
+
+ if err := r.SetQueryParam("pool", qPool); err != nil {
+ return err
+ }
+ }
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/ipam/delete_ipam_ip_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/ipam/delete_ipam_ip_responses.go
new file mode 100644
index 000000000..ae095b679
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/ipam/delete_ipam_ip_responses.go
@@ -0,0 +1,384 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package ipam
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/cilium/cilium/api/v1/models"
+)
+
+// DeleteIpamIPReader is a Reader for the DeleteIpamIP structure.
+type DeleteIpamIPReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *DeleteIpamIPReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewDeleteIpamIPOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 400:
+ result := NewDeleteIpamIPInvalid()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ case 403:
+ result := NewDeleteIpamIPForbidden()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ case 404:
+ result := NewDeleteIpamIPNotFound()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ case 500:
+ result := NewDeleteIpamIPFailure()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ case 501:
+ result := NewDeleteIpamIPDisabled()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ default:
+ return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code())
+ }
+}
+
+// NewDeleteIpamIPOK creates a DeleteIpamIPOK with default headers values
+func NewDeleteIpamIPOK() *DeleteIpamIPOK {
+ return &DeleteIpamIPOK{}
+}
+
+/*
+DeleteIpamIPOK describes a response with status code 200, with default header values.
+
+Success
+*/
+type DeleteIpamIPOK struct {
+}
+
+// IsSuccess returns true when this delete ipam Ip o k response has a 2xx status code
+func (o *DeleteIpamIPOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this delete ipam Ip o k response has a 3xx status code
+func (o *DeleteIpamIPOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this delete ipam Ip o k response has a 4xx status code
+func (o *DeleteIpamIPOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this delete ipam Ip o k response has a 5xx status code
+func (o *DeleteIpamIPOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this delete ipam Ip o k response a status code equal to that given
+func (o *DeleteIpamIPOK) IsCode(code int) bool {
+ return code == 200
+}
+
+func (o *DeleteIpamIPOK) Error() string {
+ return fmt.Sprintf("[DELETE /ipam/{ip}][%d] deleteIpamIpOK ", 200)
+}
+
+func (o *DeleteIpamIPOK) String() string {
+ return fmt.Sprintf("[DELETE /ipam/{ip}][%d] deleteIpamIpOK ", 200)
+}
+
+func (o *DeleteIpamIPOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
+
+// NewDeleteIpamIPInvalid creates a DeleteIpamIPInvalid with default headers values
+func NewDeleteIpamIPInvalid() *DeleteIpamIPInvalid {
+ return &DeleteIpamIPInvalid{}
+}
+
+/*
+DeleteIpamIPInvalid describes a response with status code 400, with default header values.
+
+Invalid IP address
+*/
+type DeleteIpamIPInvalid struct {
+}
+
+// IsSuccess returns true when this delete ipam Ip invalid response has a 2xx status code
+func (o *DeleteIpamIPInvalid) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this delete ipam Ip invalid response has a 3xx status code
+func (o *DeleteIpamIPInvalid) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this delete ipam Ip invalid response has a 4xx status code
+func (o *DeleteIpamIPInvalid) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this delete ipam Ip invalid response has a 5xx status code
+func (o *DeleteIpamIPInvalid) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this delete ipam Ip invalid response a status code equal to that given
+func (o *DeleteIpamIPInvalid) IsCode(code int) bool {
+ return code == 400
+}
+
+func (o *DeleteIpamIPInvalid) Error() string {
+ return fmt.Sprintf("[DELETE /ipam/{ip}][%d] deleteIpamIpInvalid ", 400)
+}
+
+func (o *DeleteIpamIPInvalid) String() string {
+ return fmt.Sprintf("[DELETE /ipam/{ip}][%d] deleteIpamIpInvalid ", 400)
+}
+
+func (o *DeleteIpamIPInvalid) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
+
+// NewDeleteIpamIPForbidden creates a DeleteIpamIPForbidden with default headers values
+func NewDeleteIpamIPForbidden() *DeleteIpamIPForbidden {
+ return &DeleteIpamIPForbidden{}
+}
+
+/*
+DeleteIpamIPForbidden describes a response with status code 403, with default header values.
+
+Forbidden
+*/
+type DeleteIpamIPForbidden struct {
+}
+
+// IsSuccess returns true when this delete ipam Ip forbidden response has a 2xx status code
+func (o *DeleteIpamIPForbidden) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this delete ipam Ip forbidden response has a 3xx status code
+func (o *DeleteIpamIPForbidden) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this delete ipam Ip forbidden response has a 4xx status code
+func (o *DeleteIpamIPForbidden) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this delete ipam Ip forbidden response has a 5xx status code
+func (o *DeleteIpamIPForbidden) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this delete ipam Ip forbidden response a status code equal to that given
+func (o *DeleteIpamIPForbidden) IsCode(code int) bool {
+ return code == 403
+}
+
+func (o *DeleteIpamIPForbidden) Error() string {
+ return fmt.Sprintf("[DELETE /ipam/{ip}][%d] deleteIpamIpForbidden ", 403)
+}
+
+func (o *DeleteIpamIPForbidden) String() string {
+ return fmt.Sprintf("[DELETE /ipam/{ip}][%d] deleteIpamIpForbidden ", 403)
+}
+
+func (o *DeleteIpamIPForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
+
+// NewDeleteIpamIPNotFound creates a DeleteIpamIPNotFound with default headers values
+func NewDeleteIpamIPNotFound() *DeleteIpamIPNotFound {
+ return &DeleteIpamIPNotFound{}
+}
+
+/*
+DeleteIpamIPNotFound describes a response with status code 404, with default header values.
+
+IP address not found
+*/
+type DeleteIpamIPNotFound struct {
+}
+
+// IsSuccess returns true when this delete ipam Ip not found response has a 2xx status code
+func (o *DeleteIpamIPNotFound) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this delete ipam Ip not found response has a 3xx status code
+func (o *DeleteIpamIPNotFound) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this delete ipam Ip not found response has a 4xx status code
+func (o *DeleteIpamIPNotFound) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this delete ipam Ip not found response has a 5xx status code
+func (o *DeleteIpamIPNotFound) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this delete ipam Ip not found response a status code equal to that given
+func (o *DeleteIpamIPNotFound) IsCode(code int) bool {
+ return code == 404
+}
+
+func (o *DeleteIpamIPNotFound) Error() string {
+ return fmt.Sprintf("[DELETE /ipam/{ip}][%d] deleteIpamIpNotFound ", 404)
+}
+
+func (o *DeleteIpamIPNotFound) String() string {
+ return fmt.Sprintf("[DELETE /ipam/{ip}][%d] deleteIpamIpNotFound ", 404)
+}
+
+func (o *DeleteIpamIPNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
+
+// NewDeleteIpamIPFailure creates a DeleteIpamIPFailure with default headers values
+func NewDeleteIpamIPFailure() *DeleteIpamIPFailure {
+ return &DeleteIpamIPFailure{}
+}
+
+/*
+DeleteIpamIPFailure describes a response with status code 500, with default header values.
+
+Address release failure
+*/
+type DeleteIpamIPFailure struct {
+ Payload models.Error
+}
+
+// IsSuccess returns true when this delete ipam Ip failure response has a 2xx status code
+func (o *DeleteIpamIPFailure) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this delete ipam Ip failure response has a 3xx status code
+func (o *DeleteIpamIPFailure) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this delete ipam Ip failure response has a 4xx status code
+func (o *DeleteIpamIPFailure) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this delete ipam Ip failure response has a 5xx status code
+func (o *DeleteIpamIPFailure) IsServerError() bool {
+ return true
+}
+
+// IsCode returns true when this delete ipam Ip failure response a status code equal to that given
+func (o *DeleteIpamIPFailure) IsCode(code int) bool {
+ return code == 500
+}
+
+func (o *DeleteIpamIPFailure) Error() string {
+ return fmt.Sprintf("[DELETE /ipam/{ip}][%d] deleteIpamIpFailure %+v", 500, o.Payload)
+}
+
+func (o *DeleteIpamIPFailure) String() string {
+ return fmt.Sprintf("[DELETE /ipam/{ip}][%d] deleteIpamIpFailure %+v", 500, o.Payload)
+}
+
+func (o *DeleteIpamIPFailure) GetPayload() models.Error {
+ return o.Payload
+}
+
+func (o *DeleteIpamIPFailure) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewDeleteIpamIPDisabled creates a DeleteIpamIPDisabled with default headers values
+func NewDeleteIpamIPDisabled() *DeleteIpamIPDisabled {
+ return &DeleteIpamIPDisabled{}
+}
+
+/*
+DeleteIpamIPDisabled describes a response with status code 501, with default header values.
+
+Allocation for address family disabled
+*/
+type DeleteIpamIPDisabled struct {
+}
+
+// IsSuccess returns true when this delete ipam Ip disabled response has a 2xx status code
+func (o *DeleteIpamIPDisabled) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this delete ipam Ip disabled response has a 3xx status code
+func (o *DeleteIpamIPDisabled) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this delete ipam Ip disabled response has a 4xx status code
+func (o *DeleteIpamIPDisabled) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this delete ipam Ip disabled response has a 5xx status code
+func (o *DeleteIpamIPDisabled) IsServerError() bool {
+ return true
+}
+
+// IsCode returns true when this delete ipam Ip disabled response a status code equal to that given
+func (o *DeleteIpamIPDisabled) IsCode(code int) bool {
+ return code == 501
+}
+
+func (o *DeleteIpamIPDisabled) Error() string {
+ return fmt.Sprintf("[DELETE /ipam/{ip}][%d] deleteIpamIpDisabled ", 501)
+}
+
+func (o *DeleteIpamIPDisabled) String() string {
+ return fmt.Sprintf("[DELETE /ipam/{ip}][%d] deleteIpamIpDisabled ", 501)
+}
+
+func (o *DeleteIpamIPDisabled) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/ipam/ipam_client.go b/vendor/github.com/cilium/cilium/api/v1/client/ipam/ipam_client.go
new file mode 100644
index 000000000..bff4193d6
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/ipam/ipam_client.go
@@ -0,0 +1,162 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package ipam
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+)
+
+// New creates a new ipam API client.
+func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService {
+ return &Client{transport: transport, formats: formats}
+}
+
+/*
+Client for ipam API
+*/
+type Client struct {
+ transport runtime.ClientTransport
+ formats strfmt.Registry
+}
+
+// ClientOption is the option for Client methods
+type ClientOption func(*runtime.ClientOperation)
+
+// ClientService is the interface for Client methods
+type ClientService interface {
+ DeleteIpamIP(params *DeleteIpamIPParams, opts ...ClientOption) (*DeleteIpamIPOK, error)
+
+ PostIpam(params *PostIpamParams, opts ...ClientOption) (*PostIpamCreated, error)
+
+ PostIpamIP(params *PostIpamIPParams, opts ...ClientOption) (*PostIpamIPOK, error)
+
+ SetTransport(transport runtime.ClientTransport)
+}
+
+/*
+DeleteIpamIP releases an allocated IP address
+*/
+func (a *Client) DeleteIpamIP(params *DeleteIpamIPParams, opts ...ClientOption) (*DeleteIpamIPOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewDeleteIpamIPParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "DeleteIpamIP",
+ Method: "DELETE",
+ PathPattern: "/ipam/{ip}",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &DeleteIpamIPReader{formats: a.formats},
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*DeleteIpamIPOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for DeleteIpamIP: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+/*
+PostIpam allocates an IP address
+*/
+func (a *Client) PostIpam(params *PostIpamParams, opts ...ClientOption) (*PostIpamCreated, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewPostIpamParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "PostIpam",
+ Method: "POST",
+ PathPattern: "/ipam",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &PostIpamReader{formats: a.formats},
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*PostIpamCreated)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for PostIpam: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+/*
+PostIpamIP allocates an IP address
+*/
+func (a *Client) PostIpamIP(params *PostIpamIPParams, opts ...ClientOption) (*PostIpamIPOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewPostIpamIPParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "PostIpamIP",
+ Method: "POST",
+ PathPattern: "/ipam/{ip}",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &PostIpamIPReader{formats: a.formats},
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*PostIpamIPOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for PostIpamIP: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+// SetTransport changes the transport on the client
+func (a *Client) SetTransport(transport runtime.ClientTransport) {
+ a.transport = transport
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/ipam/post_ipam_ip_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/ipam/post_ipam_ip_parameters.go
new file mode 100644
index 000000000..6dbf02985
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/ipam/post_ipam_ip_parameters.go
@@ -0,0 +1,216 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package ipam
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewPostIpamIPParams creates a new PostIpamIPParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewPostIpamIPParams() *PostIpamIPParams {
+ return &PostIpamIPParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewPostIpamIPParamsWithTimeout creates a new PostIpamIPParams object
+// with the ability to set a timeout on a request.
+func NewPostIpamIPParamsWithTimeout(timeout time.Duration) *PostIpamIPParams {
+ return &PostIpamIPParams{
+ timeout: timeout,
+ }
+}
+
+// NewPostIpamIPParamsWithContext creates a new PostIpamIPParams object
+// with the ability to set a context for a request.
+func NewPostIpamIPParamsWithContext(ctx context.Context) *PostIpamIPParams {
+ return &PostIpamIPParams{
+ Context: ctx,
+ }
+}
+
+// NewPostIpamIPParamsWithHTTPClient creates a new PostIpamIPParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewPostIpamIPParamsWithHTTPClient(client *http.Client) *PostIpamIPParams {
+ return &PostIpamIPParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+PostIpamIPParams contains all the parameters to send to the API endpoint
+
+ for the post ipam IP operation.
+
+ Typically these are written to a http.Request.
+*/
+type PostIpamIPParams struct {
+
+ /* IP.
+
+ IP address
+ */
+ IP string
+
+ // Owner.
+ Owner *string
+
+ // Pool.
+ Pool *string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the post ipam IP params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *PostIpamIPParams) WithDefaults() *PostIpamIPParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the post ipam IP params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *PostIpamIPParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the post ipam IP params
+func (o *PostIpamIPParams) WithTimeout(timeout time.Duration) *PostIpamIPParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the post ipam IP params
+func (o *PostIpamIPParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the post ipam IP params
+func (o *PostIpamIPParams) WithContext(ctx context.Context) *PostIpamIPParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the post ipam IP params
+func (o *PostIpamIPParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the post ipam IP params
+func (o *PostIpamIPParams) WithHTTPClient(client *http.Client) *PostIpamIPParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the post ipam IP params
+func (o *PostIpamIPParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithIP adds the ip to the post ipam IP params
+func (o *PostIpamIPParams) WithIP(ip string) *PostIpamIPParams {
+ o.SetIP(ip)
+ return o
+}
+
+// SetIP adds the ip to the post ipam IP params
+func (o *PostIpamIPParams) SetIP(ip string) {
+ o.IP = ip
+}
+
+// WithOwner adds the owner to the post ipam IP params
+func (o *PostIpamIPParams) WithOwner(owner *string) *PostIpamIPParams {
+ o.SetOwner(owner)
+ return o
+}
+
+// SetOwner adds the owner to the post ipam IP params
+func (o *PostIpamIPParams) SetOwner(owner *string) {
+ o.Owner = owner
+}
+
+// WithPool adds the pool to the post ipam IP params
+func (o *PostIpamIPParams) WithPool(pool *string) *PostIpamIPParams {
+ o.SetPool(pool)
+ return o
+}
+
+// SetPool adds the pool to the post ipam IP params
+func (o *PostIpamIPParams) SetPool(pool *string) {
+ o.Pool = pool
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *PostIpamIPParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ // path param ip
+ if err := r.SetPathParam("ip", o.IP); err != nil {
+ return err
+ }
+
+ if o.Owner != nil {
+
+ // query param owner
+ var qrOwner string
+
+ if o.Owner != nil {
+ qrOwner = *o.Owner
+ }
+ qOwner := qrOwner
+ if qOwner != "" {
+
+ if err := r.SetQueryParam("owner", qOwner); err != nil {
+ return err
+ }
+ }
+ }
+
+ if o.Pool != nil {
+
+ // query param pool
+ var qrPool string
+
+ if o.Pool != nil {
+ qrPool = *o.Pool
+ }
+ qPool := qrPool
+ if qPool != "" {
+
+ if err := r.SetQueryParam("pool", qPool); err != nil {
+ return err
+ }
+ }
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/ipam/post_ipam_ip_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/ipam/post_ipam_ip_responses.go
new file mode 100644
index 000000000..1c8cff834
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/ipam/post_ipam_ip_responses.go
@@ -0,0 +1,384 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package ipam
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/cilium/cilium/api/v1/models"
+)
+
+// PostIpamIPReader is a Reader for the PostIpamIP structure.
+type PostIpamIPReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *PostIpamIPReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewPostIpamIPOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 400:
+ result := NewPostIpamIPInvalid()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ case 403:
+ result := NewPostIpamIPForbidden()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ case 409:
+ result := NewPostIpamIPExists()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ case 500:
+ result := NewPostIpamIPFailure()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ case 501:
+ result := NewPostIpamIPDisabled()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ default:
+ return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code())
+ }
+}
+
+// NewPostIpamIPOK creates a PostIpamIPOK with default headers values
+func NewPostIpamIPOK() *PostIpamIPOK {
+ return &PostIpamIPOK{}
+}
+
+/*
+PostIpamIPOK describes a response with status code 200, with default header values.
+
+Success
+*/
+type PostIpamIPOK struct {
+}
+
+// IsSuccess returns true when this post ipam Ip o k response has a 2xx status code
+func (o *PostIpamIPOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this post ipam Ip o k response has a 3xx status code
+func (o *PostIpamIPOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this post ipam Ip o k response has a 4xx status code
+func (o *PostIpamIPOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this post ipam Ip o k response has a 5xx status code
+func (o *PostIpamIPOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this post ipam Ip o k response a status code equal to that given
+func (o *PostIpamIPOK) IsCode(code int) bool {
+ return code == 200
+}
+
+func (o *PostIpamIPOK) Error() string {
+ return fmt.Sprintf("[POST /ipam/{ip}][%d] postIpamIpOK ", 200)
+}
+
+func (o *PostIpamIPOK) String() string {
+ return fmt.Sprintf("[POST /ipam/{ip}][%d] postIpamIpOK ", 200)
+}
+
+func (o *PostIpamIPOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
+
+// NewPostIpamIPInvalid creates a PostIpamIPInvalid with default headers values
+func NewPostIpamIPInvalid() *PostIpamIPInvalid {
+ return &PostIpamIPInvalid{}
+}
+
+/*
+PostIpamIPInvalid describes a response with status code 400, with default header values.
+
+Invalid IP address
+*/
+type PostIpamIPInvalid struct {
+}
+
+// IsSuccess returns true when this post ipam Ip invalid response has a 2xx status code
+func (o *PostIpamIPInvalid) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this post ipam Ip invalid response has a 3xx status code
+func (o *PostIpamIPInvalid) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this post ipam Ip invalid response has a 4xx status code
+func (o *PostIpamIPInvalid) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this post ipam Ip invalid response has a 5xx status code
+func (o *PostIpamIPInvalid) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this post ipam Ip invalid response a status code equal to that given
+func (o *PostIpamIPInvalid) IsCode(code int) bool {
+ return code == 400
+}
+
+func (o *PostIpamIPInvalid) Error() string {
+ return fmt.Sprintf("[POST /ipam/{ip}][%d] postIpamIpInvalid ", 400)
+}
+
+func (o *PostIpamIPInvalid) String() string {
+ return fmt.Sprintf("[POST /ipam/{ip}][%d] postIpamIpInvalid ", 400)
+}
+
+func (o *PostIpamIPInvalid) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
+
+// NewPostIpamIPForbidden creates a PostIpamIPForbidden with default headers values
+func NewPostIpamIPForbidden() *PostIpamIPForbidden {
+ return &PostIpamIPForbidden{}
+}
+
+/*
+PostIpamIPForbidden describes a response with status code 403, with default header values.
+
+Forbidden
+*/
+type PostIpamIPForbidden struct {
+}
+
+// IsSuccess returns true when this post ipam Ip forbidden response has a 2xx status code
+func (o *PostIpamIPForbidden) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this post ipam Ip forbidden response has a 3xx status code
+func (o *PostIpamIPForbidden) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this post ipam Ip forbidden response has a 4xx status code
+func (o *PostIpamIPForbidden) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this post ipam Ip forbidden response has a 5xx status code
+func (o *PostIpamIPForbidden) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this post ipam Ip forbidden response a status code equal to that given
+func (o *PostIpamIPForbidden) IsCode(code int) bool {
+ return code == 403
+}
+
+func (o *PostIpamIPForbidden) Error() string {
+ return fmt.Sprintf("[POST /ipam/{ip}][%d] postIpamIpForbidden ", 403)
+}
+
+func (o *PostIpamIPForbidden) String() string {
+ return fmt.Sprintf("[POST /ipam/{ip}][%d] postIpamIpForbidden ", 403)
+}
+
+func (o *PostIpamIPForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
+
+// NewPostIpamIPExists creates a PostIpamIPExists with default headers values
+func NewPostIpamIPExists() *PostIpamIPExists {
+ return &PostIpamIPExists{}
+}
+
+/*
+PostIpamIPExists describes a response with status code 409, with default header values.
+
+IP already allocated
+*/
+type PostIpamIPExists struct {
+}
+
+// IsSuccess returns true when this post ipam Ip exists response has a 2xx status code
+func (o *PostIpamIPExists) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this post ipam Ip exists response has a 3xx status code
+func (o *PostIpamIPExists) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this post ipam Ip exists response has a 4xx status code
+func (o *PostIpamIPExists) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this post ipam Ip exists response has a 5xx status code
+func (o *PostIpamIPExists) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this post ipam Ip exists response a status code equal to that given
+func (o *PostIpamIPExists) IsCode(code int) bool {
+ return code == 409
+}
+
+func (o *PostIpamIPExists) Error() string {
+ return fmt.Sprintf("[POST /ipam/{ip}][%d] postIpamIpExists ", 409)
+}
+
+func (o *PostIpamIPExists) String() string {
+ return fmt.Sprintf("[POST /ipam/{ip}][%d] postIpamIpExists ", 409)
+}
+
+func (o *PostIpamIPExists) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
+
+// NewPostIpamIPFailure creates a PostIpamIPFailure with default headers values
+func NewPostIpamIPFailure() *PostIpamIPFailure {
+ return &PostIpamIPFailure{}
+}
+
+/*
+PostIpamIPFailure describes a response with status code 500, with default header values.
+
+IP allocation failure. Details in message.
+*/
+type PostIpamIPFailure struct {
+ Payload models.Error
+}
+
+// IsSuccess returns true when this post ipam Ip failure response has a 2xx status code
+func (o *PostIpamIPFailure) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this post ipam Ip failure response has a 3xx status code
+func (o *PostIpamIPFailure) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this post ipam Ip failure response has a 4xx status code
+func (o *PostIpamIPFailure) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this post ipam Ip failure response has a 5xx status code
+func (o *PostIpamIPFailure) IsServerError() bool {
+ return true
+}
+
+// IsCode returns true when this post ipam Ip failure response a status code equal to that given
+func (o *PostIpamIPFailure) IsCode(code int) bool {
+ return code == 500
+}
+
+func (o *PostIpamIPFailure) Error() string {
+ return fmt.Sprintf("[POST /ipam/{ip}][%d] postIpamIpFailure %+v", 500, o.Payload)
+}
+
+func (o *PostIpamIPFailure) String() string {
+ return fmt.Sprintf("[POST /ipam/{ip}][%d] postIpamIpFailure %+v", 500, o.Payload)
+}
+
+func (o *PostIpamIPFailure) GetPayload() models.Error {
+ return o.Payload
+}
+
+func (o *PostIpamIPFailure) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewPostIpamIPDisabled creates a PostIpamIPDisabled with default headers values
+func NewPostIpamIPDisabled() *PostIpamIPDisabled {
+ return &PostIpamIPDisabled{}
+}
+
+/*
+PostIpamIPDisabled describes a response with status code 501, with default header values.
+
+Allocation for address family disabled
+*/
+type PostIpamIPDisabled struct {
+}
+
+// IsSuccess returns true when this post ipam Ip disabled response has a 2xx status code
+func (o *PostIpamIPDisabled) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this post ipam Ip disabled response has a 3xx status code
+func (o *PostIpamIPDisabled) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this post ipam Ip disabled response has a 4xx status code
+func (o *PostIpamIPDisabled) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this post ipam Ip disabled response has a 5xx status code
+func (o *PostIpamIPDisabled) IsServerError() bool {
+ return true
+}
+
+// IsCode returns true when this post ipam Ip disabled response a status code equal to that given
+func (o *PostIpamIPDisabled) IsCode(code int) bool {
+ return code == 501
+}
+
+func (o *PostIpamIPDisabled) Error() string {
+ return fmt.Sprintf("[POST /ipam/{ip}][%d] postIpamIpDisabled ", 501)
+}
+
+func (o *PostIpamIPDisabled) String() string {
+ return fmt.Sprintf("[POST /ipam/{ip}][%d] postIpamIpDisabled ", 501)
+}
+
+func (o *PostIpamIPDisabled) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/ipam/post_ipam_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/ipam/post_ipam_parameters.go
new file mode 100644
index 000000000..8e2075849
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/ipam/post_ipam_parameters.go
@@ -0,0 +1,248 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package ipam
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+)
+
+// NewPostIpamParams creates a new PostIpamParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewPostIpamParams() *PostIpamParams {
+ return &PostIpamParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewPostIpamParamsWithTimeout creates a new PostIpamParams object
+// with the ability to set a timeout on a request.
+func NewPostIpamParamsWithTimeout(timeout time.Duration) *PostIpamParams {
+ return &PostIpamParams{
+ timeout: timeout,
+ }
+}
+
+// NewPostIpamParamsWithContext creates a new PostIpamParams object
+// with the ability to set a context for a request.
+func NewPostIpamParamsWithContext(ctx context.Context) *PostIpamParams {
+ return &PostIpamParams{
+ Context: ctx,
+ }
+}
+
+// NewPostIpamParamsWithHTTPClient creates a new PostIpamParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewPostIpamParamsWithHTTPClient(client *http.Client) *PostIpamParams {
+ return &PostIpamParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+PostIpamParams contains all the parameters to send to the API endpoint
+
+ for the post ipam operation.
+
+ Typically these are written to a http.Request.
+*/
+type PostIpamParams struct {
+
+ // Expiration.
+ Expiration *bool
+
+ // Family.
+ Family *string
+
+ // Owner.
+ Owner *string
+
+ // Pool.
+ Pool *string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the post ipam params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *PostIpamParams) WithDefaults() *PostIpamParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the post ipam params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *PostIpamParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the post ipam params
+func (o *PostIpamParams) WithTimeout(timeout time.Duration) *PostIpamParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the post ipam params
+func (o *PostIpamParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the post ipam params
+func (o *PostIpamParams) WithContext(ctx context.Context) *PostIpamParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the post ipam params
+func (o *PostIpamParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the post ipam params
+func (o *PostIpamParams) WithHTTPClient(client *http.Client) *PostIpamParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the post ipam params
+func (o *PostIpamParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithExpiration adds the expiration to the post ipam params
+func (o *PostIpamParams) WithExpiration(expiration *bool) *PostIpamParams {
+ o.SetExpiration(expiration)
+ return o
+}
+
+// SetExpiration adds the expiration to the post ipam params
+func (o *PostIpamParams) SetExpiration(expiration *bool) {
+ o.Expiration = expiration
+}
+
+// WithFamily adds the family to the post ipam params
+func (o *PostIpamParams) WithFamily(family *string) *PostIpamParams {
+ o.SetFamily(family)
+ return o
+}
+
+// SetFamily adds the family to the post ipam params
+func (o *PostIpamParams) SetFamily(family *string) {
+ o.Family = family
+}
+
+// WithOwner adds the owner to the post ipam params
+func (o *PostIpamParams) WithOwner(owner *string) *PostIpamParams {
+ o.SetOwner(owner)
+ return o
+}
+
+// SetOwner adds the owner to the post ipam params
+func (o *PostIpamParams) SetOwner(owner *string) {
+ o.Owner = owner
+}
+
+// WithPool adds the pool to the post ipam params
+func (o *PostIpamParams) WithPool(pool *string) *PostIpamParams {
+ o.SetPool(pool)
+ return o
+}
+
+// SetPool adds the pool to the post ipam params
+func (o *PostIpamParams) SetPool(pool *string) {
+ o.Pool = pool
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *PostIpamParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ if o.Expiration != nil {
+
+ // header param expiration
+ if err := r.SetHeaderParam("expiration", swag.FormatBool(*o.Expiration)); err != nil {
+ return err
+ }
+ }
+
+ if o.Family != nil {
+
+ // query param family
+ var qrFamily string
+
+ if o.Family != nil {
+ qrFamily = *o.Family
+ }
+ qFamily := qrFamily
+ if qFamily != "" {
+
+ if err := r.SetQueryParam("family", qFamily); err != nil {
+ return err
+ }
+ }
+ }
+
+ if o.Owner != nil {
+
+ // query param owner
+ var qrOwner string
+
+ if o.Owner != nil {
+ qrOwner = *o.Owner
+ }
+ qOwner := qrOwner
+ if qOwner != "" {
+
+ if err := r.SetQueryParam("owner", qOwner); err != nil {
+ return err
+ }
+ }
+ }
+
+ if o.Pool != nil {
+
+ // query param pool
+ var qrPool string
+
+ if o.Pool != nil {
+ qrPool = *o.Pool
+ }
+ qPool := qrPool
+ if qPool != "" {
+
+ if err := r.SetQueryParam("pool", qPool); err != nil {
+ return err
+ }
+ }
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/ipam/post_ipam_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/ipam/post_ipam_responses.go
new file mode 100644
index 000000000..2f13cc294
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/ipam/post_ipam_responses.go
@@ -0,0 +1,225 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package ipam
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/cilium/cilium/api/v1/models"
+)
+
+// PostIpamReader is a Reader for the PostIpam structure.
+type PostIpamReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *PostIpamReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 201:
+ result := NewPostIpamCreated()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 403:
+ result := NewPostIpamForbidden()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ case 502:
+ result := NewPostIpamFailure()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ default:
+ return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code())
+ }
+}
+
+// NewPostIpamCreated creates a PostIpamCreated with default headers values
+func NewPostIpamCreated() *PostIpamCreated {
+ return &PostIpamCreated{}
+}
+
+/*
+PostIpamCreated describes a response with status code 201, with default header values.
+
+Success
+*/
+type PostIpamCreated struct {
+ Payload *models.IPAMResponse
+}
+
+// IsSuccess returns true when this post ipam created response has a 2xx status code
+func (o *PostIpamCreated) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this post ipam created response has a 3xx status code
+func (o *PostIpamCreated) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this post ipam created response has a 4xx status code
+func (o *PostIpamCreated) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this post ipam created response has a 5xx status code
+func (o *PostIpamCreated) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this post ipam created response a status code equal to that given
+func (o *PostIpamCreated) IsCode(code int) bool {
+ return code == 201
+}
+
+func (o *PostIpamCreated) Error() string {
+ return fmt.Sprintf("[POST /ipam][%d] postIpamCreated %+v", 201, o.Payload)
+}
+
+func (o *PostIpamCreated) String() string {
+ return fmt.Sprintf("[POST /ipam][%d] postIpamCreated %+v", 201, o.Payload)
+}
+
+func (o *PostIpamCreated) GetPayload() *models.IPAMResponse {
+ return o.Payload
+}
+
+func (o *PostIpamCreated) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ o.Payload = new(models.IPAMResponse)
+
+ // response payload
+ if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewPostIpamForbidden creates a PostIpamForbidden with default headers values
+func NewPostIpamForbidden() *PostIpamForbidden {
+ return &PostIpamForbidden{}
+}
+
+/*
+PostIpamForbidden describes a response with status code 403, with default header values.
+
+Forbidden
+*/
+type PostIpamForbidden struct {
+}
+
+// IsSuccess returns true when this post ipam forbidden response has a 2xx status code
+func (o *PostIpamForbidden) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this post ipam forbidden response has a 3xx status code
+func (o *PostIpamForbidden) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this post ipam forbidden response has a 4xx status code
+func (o *PostIpamForbidden) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this post ipam forbidden response has a 5xx status code
+func (o *PostIpamForbidden) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this post ipam forbidden response a status code equal to that given
+func (o *PostIpamForbidden) IsCode(code int) bool {
+ return code == 403
+}
+
+func (o *PostIpamForbidden) Error() string {
+ return fmt.Sprintf("[POST /ipam][%d] postIpamForbidden ", 403)
+}
+
+func (o *PostIpamForbidden) String() string {
+ return fmt.Sprintf("[POST /ipam][%d] postIpamForbidden ", 403)
+}
+
+func (o *PostIpamForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
+
+// NewPostIpamFailure creates a PostIpamFailure with default headers values
+func NewPostIpamFailure() *PostIpamFailure {
+ return &PostIpamFailure{}
+}
+
+/*
+PostIpamFailure describes a response with status code 502, with default header values.
+
+Allocation failure
+*/
+type PostIpamFailure struct {
+ Payload models.Error
+}
+
+// IsSuccess returns true when this post ipam failure response has a 2xx status code
+func (o *PostIpamFailure) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this post ipam failure response has a 3xx status code
+func (o *PostIpamFailure) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this post ipam failure response has a 4xx status code
+func (o *PostIpamFailure) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this post ipam failure response has a 5xx status code
+func (o *PostIpamFailure) IsServerError() bool {
+ return true
+}
+
+// IsCode returns true when this post ipam failure response a status code equal to that given
+func (o *PostIpamFailure) IsCode(code int) bool {
+ return code == 502
+}
+
+func (o *PostIpamFailure) Error() string {
+ return fmt.Sprintf("[POST /ipam][%d] postIpamFailure %+v", 502, o.Payload)
+}
+
+func (o *PostIpamFailure) String() string {
+ return fmt.Sprintf("[POST /ipam][%d] postIpamFailure %+v", 502, o.Payload)
+}
+
+func (o *PostIpamFailure) GetPayload() models.Error {
+ return o.Payload
+}
+
+func (o *PostIpamFailure) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/metrics/get_metrics_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/metrics/get_metrics_parameters.go
new file mode 100644
index 000000000..7dd482af5
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/metrics/get_metrics_parameters.go
@@ -0,0 +1,131 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package metrics
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewGetMetricsParams creates a new GetMetricsParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewGetMetricsParams() *GetMetricsParams {
+ return &GetMetricsParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewGetMetricsParamsWithTimeout creates a new GetMetricsParams object
+// with the ability to set a timeout on a request.
+func NewGetMetricsParamsWithTimeout(timeout time.Duration) *GetMetricsParams {
+ return &GetMetricsParams{
+ timeout: timeout,
+ }
+}
+
+// NewGetMetricsParamsWithContext creates a new GetMetricsParams object
+// with the ability to set a context for a request.
+func NewGetMetricsParamsWithContext(ctx context.Context) *GetMetricsParams {
+ return &GetMetricsParams{
+ Context: ctx,
+ }
+}
+
+// NewGetMetricsParamsWithHTTPClient creates a new GetMetricsParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewGetMetricsParamsWithHTTPClient(client *http.Client) *GetMetricsParams {
+ return &GetMetricsParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+GetMetricsParams contains all the parameters to send to the API endpoint
+
+ for the get metrics operation.
+
+ Typically these are written to a http.Request.
+*/
+type GetMetricsParams struct {
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the get metrics params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetMetricsParams) WithDefaults() *GetMetricsParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the get metrics params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetMetricsParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the get metrics params
+func (o *GetMetricsParams) WithTimeout(timeout time.Duration) *GetMetricsParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the get metrics params
+func (o *GetMetricsParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the get metrics params
+func (o *GetMetricsParams) WithContext(ctx context.Context) *GetMetricsParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the get metrics params
+func (o *GetMetricsParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the get metrics params
+func (o *GetMetricsParams) WithHTTPClient(client *http.Client) *GetMetricsParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the get metrics params
+func (o *GetMetricsParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *GetMetricsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/metrics/get_metrics_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/metrics/get_metrics_responses.go
new file mode 100644
index 000000000..766782c92
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/metrics/get_metrics_responses.go
@@ -0,0 +1,156 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package metrics
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/cilium/cilium/api/v1/models"
+)
+
+// GetMetricsReader is a Reader for the GetMetrics structure.
+type GetMetricsReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *GetMetricsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewGetMetricsOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 500:
+ result := NewGetMetricsInternalServerError()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ default:
+ return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code())
+ }
+}
+
+// NewGetMetricsOK creates a GetMetricsOK with default headers values
+func NewGetMetricsOK() *GetMetricsOK {
+ return &GetMetricsOK{}
+}
+
+/*
+GetMetricsOK describes a response with status code 200, with default header values.
+
+Success
+*/
+type GetMetricsOK struct {
+ Payload []*models.Metric
+}
+
+// IsSuccess returns true when this get metrics o k response has a 2xx status code
+func (o *GetMetricsOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this get metrics o k response has a 3xx status code
+func (o *GetMetricsOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get metrics o k response has a 4xx status code
+func (o *GetMetricsOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get metrics o k response has a 5xx status code
+func (o *GetMetricsOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get metrics o k response a status code equal to that given
+func (o *GetMetricsOK) IsCode(code int) bool {
+ return code == 200
+}
+
+func (o *GetMetricsOK) Error() string {
+ return fmt.Sprintf("[GET /metrics/][%d] getMetricsOK %+v", 200, o.Payload)
+}
+
+func (o *GetMetricsOK) String() string {
+ return fmt.Sprintf("[GET /metrics/][%d] getMetricsOK %+v", 200, o.Payload)
+}
+
+func (o *GetMetricsOK) GetPayload() []*models.Metric {
+ return o.Payload
+}
+
+func (o *GetMetricsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewGetMetricsInternalServerError creates a GetMetricsInternalServerError with default headers values
+func NewGetMetricsInternalServerError() *GetMetricsInternalServerError {
+ return &GetMetricsInternalServerError{}
+}
+
+/*
+GetMetricsInternalServerError describes a response with status code 500, with default header values.
+
+Metrics cannot be retrieved
+*/
+type GetMetricsInternalServerError struct {
+}
+
+// IsSuccess returns true when this get metrics internal server error response has a 2xx status code
+func (o *GetMetricsInternalServerError) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this get metrics internal server error response has a 3xx status code
+func (o *GetMetricsInternalServerError) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get metrics internal server error response has a 4xx status code
+func (o *GetMetricsInternalServerError) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get metrics internal server error response has a 5xx status code
+func (o *GetMetricsInternalServerError) IsServerError() bool {
+ return true
+}
+
+// IsCode returns true when this get metrics internal server error response a status code equal to that given
+func (o *GetMetricsInternalServerError) IsCode(code int) bool {
+ return code == 500
+}
+
+func (o *GetMetricsInternalServerError) Error() string {
+ return fmt.Sprintf("[GET /metrics/][%d] getMetricsInternalServerError ", 500)
+}
+
+func (o *GetMetricsInternalServerError) String() string {
+ return fmt.Sprintf("[GET /metrics/][%d] getMetricsInternalServerError ", 500)
+}
+
+func (o *GetMetricsInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/metrics/metrics_client.go b/vendor/github.com/cilium/cilium/api/v1/client/metrics/metrics_client.go
new file mode 100644
index 000000000..d51858ef4
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/metrics/metrics_client.go
@@ -0,0 +1,82 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package metrics
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+)
+
+// New creates a new metrics API client.
+func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService {
+ return &Client{transport: transport, formats: formats}
+}
+
+/*
+Client for metrics API
+*/
+type Client struct {
+ transport runtime.ClientTransport
+ formats strfmt.Registry
+}
+
+// ClientOption is the option for Client methods
+type ClientOption func(*runtime.ClientOperation)
+
+// ClientService is the interface for Client methods
+type ClientService interface {
+ GetMetrics(params *GetMetricsParams, opts ...ClientOption) (*GetMetricsOK, error)
+
+ SetTransport(transport runtime.ClientTransport)
+}
+
+/*
+GetMetrics retrieves cilium metrics
+*/
+func (a *Client) GetMetrics(params *GetMetricsParams, opts ...ClientOption) (*GetMetricsOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewGetMetricsParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "GetMetrics",
+ Method: "GET",
+ PathPattern: "/metrics/",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &GetMetricsReader{formats: a.formats},
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*GetMetricsOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for GetMetrics: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+// SetTransport changes the transport on the client
+func (a *Client) SetTransport(transport runtime.ClientTransport) {
+ a.transport = transport
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/policy/delete_fqdn_cache_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/policy/delete_fqdn_cache_parameters.go
new file mode 100644
index 000000000..c53a2f7cf
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/policy/delete_fqdn_cache_parameters.go
@@ -0,0 +1,166 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package policy
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewDeleteFqdnCacheParams creates a new DeleteFqdnCacheParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewDeleteFqdnCacheParams() *DeleteFqdnCacheParams {
+ return &DeleteFqdnCacheParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewDeleteFqdnCacheParamsWithTimeout creates a new DeleteFqdnCacheParams object
+// with the ability to set a timeout on a request.
+func NewDeleteFqdnCacheParamsWithTimeout(timeout time.Duration) *DeleteFqdnCacheParams {
+ return &DeleteFqdnCacheParams{
+ timeout: timeout,
+ }
+}
+
+// NewDeleteFqdnCacheParamsWithContext creates a new DeleteFqdnCacheParams object
+// with the ability to set a context for a request.
+func NewDeleteFqdnCacheParamsWithContext(ctx context.Context) *DeleteFqdnCacheParams {
+ return &DeleteFqdnCacheParams{
+ Context: ctx,
+ }
+}
+
+// NewDeleteFqdnCacheParamsWithHTTPClient creates a new DeleteFqdnCacheParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewDeleteFqdnCacheParamsWithHTTPClient(client *http.Client) *DeleteFqdnCacheParams {
+ return &DeleteFqdnCacheParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+DeleteFqdnCacheParams contains all the parameters to send to the API endpoint
+
+ for the delete fqdn cache operation.
+
+ Typically these are written to a http.Request.
+*/
+type DeleteFqdnCacheParams struct {
+
+ /* Matchpattern.
+
+ A toFQDNs compatible matchPattern expression
+ */
+ Matchpattern *string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the delete fqdn cache params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *DeleteFqdnCacheParams) WithDefaults() *DeleteFqdnCacheParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the delete fqdn cache params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *DeleteFqdnCacheParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the delete fqdn cache params
+func (o *DeleteFqdnCacheParams) WithTimeout(timeout time.Duration) *DeleteFqdnCacheParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the delete fqdn cache params
+func (o *DeleteFqdnCacheParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the delete fqdn cache params
+func (o *DeleteFqdnCacheParams) WithContext(ctx context.Context) *DeleteFqdnCacheParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the delete fqdn cache params
+func (o *DeleteFqdnCacheParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the delete fqdn cache params
+func (o *DeleteFqdnCacheParams) WithHTTPClient(client *http.Client) *DeleteFqdnCacheParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the delete fqdn cache params
+func (o *DeleteFqdnCacheParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithMatchpattern adds the matchpattern to the delete fqdn cache params
+func (o *DeleteFqdnCacheParams) WithMatchpattern(matchpattern *string) *DeleteFqdnCacheParams {
+ o.SetMatchpattern(matchpattern)
+ return o
+}
+
+// SetMatchpattern adds the matchpattern to the delete fqdn cache params
+func (o *DeleteFqdnCacheParams) SetMatchpattern(matchpattern *string) {
+ o.Matchpattern = matchpattern
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *DeleteFqdnCacheParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ if o.Matchpattern != nil {
+
+ // query param matchpattern
+ var qrMatchpattern string
+
+ if o.Matchpattern != nil {
+ qrMatchpattern = *o.Matchpattern
+ }
+ qMatchpattern := qrMatchpattern
+ if qMatchpattern != "" {
+
+ if err := r.SetQueryParam("matchpattern", qMatchpattern); err != nil {
+ return err
+ }
+ }
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/policy/delete_fqdn_cache_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/policy/delete_fqdn_cache_responses.go
new file mode 100644
index 000000000..10def651e
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/policy/delete_fqdn_cache_responses.go
@@ -0,0 +1,213 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package policy
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/cilium/cilium/api/v1/models"
+)
+
+// DeleteFqdnCacheReader is a Reader for the DeleteFqdnCache structure.
+type DeleteFqdnCacheReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *DeleteFqdnCacheReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewDeleteFqdnCacheOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 400:
+ result := NewDeleteFqdnCacheBadRequest()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ case 403:
+ result := NewDeleteFqdnCacheForbidden()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ default:
+ return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code())
+ }
+}
+
+// NewDeleteFqdnCacheOK creates a DeleteFqdnCacheOK with default headers values
+func NewDeleteFqdnCacheOK() *DeleteFqdnCacheOK {
+ return &DeleteFqdnCacheOK{}
+}
+
+/*
+DeleteFqdnCacheOK describes a response with status code 200, with default header values.
+
+Success
+*/
+type DeleteFqdnCacheOK struct {
+}
+
+// IsSuccess returns true when this delete fqdn cache o k response has a 2xx status code
+func (o *DeleteFqdnCacheOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this delete fqdn cache o k response has a 3xx status code
+func (o *DeleteFqdnCacheOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this delete fqdn cache o k response has a 4xx status code
+func (o *DeleteFqdnCacheOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this delete fqdn cache o k response has a 5xx status code
+func (o *DeleteFqdnCacheOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this delete fqdn cache o k response a status code equal to that given
+func (o *DeleteFqdnCacheOK) IsCode(code int) bool {
+ return code == 200
+}
+
+func (o *DeleteFqdnCacheOK) Error() string {
+ return fmt.Sprintf("[DELETE /fqdn/cache][%d] deleteFqdnCacheOK ", 200)
+}
+
+func (o *DeleteFqdnCacheOK) String() string {
+ return fmt.Sprintf("[DELETE /fqdn/cache][%d] deleteFqdnCacheOK ", 200)
+}
+
+func (o *DeleteFqdnCacheOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
+
+// NewDeleteFqdnCacheBadRequest creates a DeleteFqdnCacheBadRequest with default headers values
+func NewDeleteFqdnCacheBadRequest() *DeleteFqdnCacheBadRequest {
+ return &DeleteFqdnCacheBadRequest{}
+}
+
+/*
+DeleteFqdnCacheBadRequest describes a response with status code 400, with default header values.
+
+Invalid request (error parsing parameters)
+*/
+type DeleteFqdnCacheBadRequest struct {
+ Payload models.Error
+}
+
+// IsSuccess returns true when this delete fqdn cache bad request response has a 2xx status code
+func (o *DeleteFqdnCacheBadRequest) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this delete fqdn cache bad request response has a 3xx status code
+func (o *DeleteFqdnCacheBadRequest) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this delete fqdn cache bad request response has a 4xx status code
+func (o *DeleteFqdnCacheBadRequest) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this delete fqdn cache bad request response has a 5xx status code
+func (o *DeleteFqdnCacheBadRequest) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this delete fqdn cache bad request response a status code equal to that given
+func (o *DeleteFqdnCacheBadRequest) IsCode(code int) bool {
+ return code == 400
+}
+
+func (o *DeleteFqdnCacheBadRequest) Error() string {
+ return fmt.Sprintf("[DELETE /fqdn/cache][%d] deleteFqdnCacheBadRequest %+v", 400, o.Payload)
+}
+
+func (o *DeleteFqdnCacheBadRequest) String() string {
+ return fmt.Sprintf("[DELETE /fqdn/cache][%d] deleteFqdnCacheBadRequest %+v", 400, o.Payload)
+}
+
+func (o *DeleteFqdnCacheBadRequest) GetPayload() models.Error {
+ return o.Payload
+}
+
+func (o *DeleteFqdnCacheBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewDeleteFqdnCacheForbidden creates a DeleteFqdnCacheForbidden with default headers values
+func NewDeleteFqdnCacheForbidden() *DeleteFqdnCacheForbidden {
+ return &DeleteFqdnCacheForbidden{}
+}
+
+/*
+DeleteFqdnCacheForbidden describes a response with status code 403, with default header values.
+
+Forbidden
+*/
+type DeleteFqdnCacheForbidden struct {
+}
+
+// IsSuccess returns true when this delete fqdn cache forbidden response has a 2xx status code
+func (o *DeleteFqdnCacheForbidden) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this delete fqdn cache forbidden response has a 3xx status code
+func (o *DeleteFqdnCacheForbidden) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this delete fqdn cache forbidden response has a 4xx status code
+func (o *DeleteFqdnCacheForbidden) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this delete fqdn cache forbidden response has a 5xx status code
+func (o *DeleteFqdnCacheForbidden) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this delete fqdn cache forbidden response a status code equal to that given
+func (o *DeleteFqdnCacheForbidden) IsCode(code int) bool {
+ return code == 403
+}
+
+func (o *DeleteFqdnCacheForbidden) Error() string {
+ return fmt.Sprintf("[DELETE /fqdn/cache][%d] deleteFqdnCacheForbidden ", 403)
+}
+
+func (o *DeleteFqdnCacheForbidden) String() string {
+ return fmt.Sprintf("[DELETE /fqdn/cache][%d] deleteFqdnCacheForbidden ", 403)
+}
+
+func (o *DeleteFqdnCacheForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/policy/delete_policy_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/policy/delete_policy_parameters.go
new file mode 100644
index 000000000..437ef67af
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/policy/delete_policy_parameters.go
@@ -0,0 +1,153 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package policy
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/cilium/cilium/api/v1/models"
+)
+
+// NewDeletePolicyParams creates a new DeletePolicyParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewDeletePolicyParams() *DeletePolicyParams {
+ return &DeletePolicyParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewDeletePolicyParamsWithTimeout creates a new DeletePolicyParams object
+// with the ability to set a timeout on a request.
+func NewDeletePolicyParamsWithTimeout(timeout time.Duration) *DeletePolicyParams {
+ return &DeletePolicyParams{
+ timeout: timeout,
+ }
+}
+
+// NewDeletePolicyParamsWithContext creates a new DeletePolicyParams object
+// with the ability to set a context for a request.
+func NewDeletePolicyParamsWithContext(ctx context.Context) *DeletePolicyParams {
+ return &DeletePolicyParams{
+ Context: ctx,
+ }
+}
+
+// NewDeletePolicyParamsWithHTTPClient creates a new DeletePolicyParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewDeletePolicyParamsWithHTTPClient(client *http.Client) *DeletePolicyParams {
+ return &DeletePolicyParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+DeletePolicyParams contains all the parameters to send to the API endpoint
+
+ for the delete policy operation.
+
+ Typically these are written to a http.Request.
+*/
+type DeletePolicyParams struct {
+
+ // Labels.
+ Labels models.Labels
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the delete policy params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *DeletePolicyParams) WithDefaults() *DeletePolicyParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the delete policy params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *DeletePolicyParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the delete policy params
+func (o *DeletePolicyParams) WithTimeout(timeout time.Duration) *DeletePolicyParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the delete policy params
+func (o *DeletePolicyParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the delete policy params
+func (o *DeletePolicyParams) WithContext(ctx context.Context) *DeletePolicyParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the delete policy params
+func (o *DeletePolicyParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the delete policy params
+func (o *DeletePolicyParams) WithHTTPClient(client *http.Client) *DeletePolicyParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the delete policy params
+func (o *DeletePolicyParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithLabels adds the labels to the delete policy params
+func (o *DeletePolicyParams) WithLabels(labels models.Labels) *DeletePolicyParams {
+ o.SetLabels(labels)
+ return o
+}
+
+// SetLabels adds the labels to the delete policy params
+func (o *DeletePolicyParams) SetLabels(labels models.Labels) {
+ o.Labels = labels
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *DeletePolicyParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+ if o.Labels != nil {
+ if err := r.SetBodyParam(o.Labels); err != nil {
+ return err
+ }
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/policy/delete_policy_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/policy/delete_policy_responses.go
new file mode 100644
index 000000000..538a831e3
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/policy/delete_policy_responses.go
@@ -0,0 +1,349 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package policy
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/cilium/cilium/api/v1/models"
+)
+
+// DeletePolicyReader is a Reader for the DeletePolicy structure.
+type DeletePolicyReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *DeletePolicyReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewDeletePolicyOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 400:
+ result := NewDeletePolicyInvalid()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ case 403:
+ result := NewDeletePolicyForbidden()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ case 404:
+ result := NewDeletePolicyNotFound()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ case 500:
+ result := NewDeletePolicyFailure()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ default:
+ return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code())
+ }
+}
+
+// NewDeletePolicyOK creates a DeletePolicyOK with default headers values
+func NewDeletePolicyOK() *DeletePolicyOK {
+ return &DeletePolicyOK{}
+}
+
+/*
+DeletePolicyOK describes a response with status code 200, with default header values.
+
+Success
+*/
+type DeletePolicyOK struct {
+ Payload *models.Policy
+}
+
+// IsSuccess returns true when this delete policy o k response has a 2xx status code
+func (o *DeletePolicyOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this delete policy o k response has a 3xx status code
+func (o *DeletePolicyOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this delete policy o k response has a 4xx status code
+func (o *DeletePolicyOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this delete policy o k response has a 5xx status code
+func (o *DeletePolicyOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this delete policy o k response a status code equal to that given
+func (o *DeletePolicyOK) IsCode(code int) bool {
+ return code == 200
+}
+
+func (o *DeletePolicyOK) Error() string {
+ return fmt.Sprintf("[DELETE /policy][%d] deletePolicyOK %+v", 200, o.Payload)
+}
+
+func (o *DeletePolicyOK) String() string {
+ return fmt.Sprintf("[DELETE /policy][%d] deletePolicyOK %+v", 200, o.Payload)
+}
+
+func (o *DeletePolicyOK) GetPayload() *models.Policy {
+ return o.Payload
+}
+
+func (o *DeletePolicyOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ o.Payload = new(models.Policy)
+
+ // response payload
+ if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewDeletePolicyInvalid creates a DeletePolicyInvalid with default headers values
+func NewDeletePolicyInvalid() *DeletePolicyInvalid {
+ return &DeletePolicyInvalid{}
+}
+
+/*
+DeletePolicyInvalid describes a response with status code 400, with default header values.
+
+Invalid request
+*/
+type DeletePolicyInvalid struct {
+ Payload models.Error
+}
+
+// IsSuccess returns true when this delete policy invalid response has a 2xx status code
+func (o *DeletePolicyInvalid) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this delete policy invalid response has a 3xx status code
+func (o *DeletePolicyInvalid) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this delete policy invalid response has a 4xx status code
+func (o *DeletePolicyInvalid) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this delete policy invalid response has a 5xx status code
+func (o *DeletePolicyInvalid) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this delete policy invalid response a status code equal to that given
+func (o *DeletePolicyInvalid) IsCode(code int) bool {
+ return code == 400
+}
+
+func (o *DeletePolicyInvalid) Error() string {
+ return fmt.Sprintf("[DELETE /policy][%d] deletePolicyInvalid %+v", 400, o.Payload)
+}
+
+func (o *DeletePolicyInvalid) String() string {
+ return fmt.Sprintf("[DELETE /policy][%d] deletePolicyInvalid %+v", 400, o.Payload)
+}
+
+func (o *DeletePolicyInvalid) GetPayload() models.Error {
+ return o.Payload
+}
+
+func (o *DeletePolicyInvalid) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewDeletePolicyForbidden creates a DeletePolicyForbidden with default headers values
+func NewDeletePolicyForbidden() *DeletePolicyForbidden {
+ return &DeletePolicyForbidden{}
+}
+
+/*
+DeletePolicyForbidden describes a response with status code 403, with default header values.
+
+Forbidden
+*/
+type DeletePolicyForbidden struct {
+}
+
+// IsSuccess returns true when this delete policy forbidden response has a 2xx status code
+func (o *DeletePolicyForbidden) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this delete policy forbidden response has a 3xx status code
+func (o *DeletePolicyForbidden) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this delete policy forbidden response has a 4xx status code
+func (o *DeletePolicyForbidden) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this delete policy forbidden response has a 5xx status code
+func (o *DeletePolicyForbidden) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this delete policy forbidden response a status code equal to that given
+func (o *DeletePolicyForbidden) IsCode(code int) bool {
+ return code == 403
+}
+
+func (o *DeletePolicyForbidden) Error() string {
+ return fmt.Sprintf("[DELETE /policy][%d] deletePolicyForbidden ", 403)
+}
+
+func (o *DeletePolicyForbidden) String() string {
+ return fmt.Sprintf("[DELETE /policy][%d] deletePolicyForbidden ", 403)
+}
+
+func (o *DeletePolicyForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
+
+// NewDeletePolicyNotFound creates a DeletePolicyNotFound with default headers values
+func NewDeletePolicyNotFound() *DeletePolicyNotFound {
+ return &DeletePolicyNotFound{}
+}
+
+/*
+DeletePolicyNotFound describes a response with status code 404, with default header values.
+
+Policy not found
+*/
+type DeletePolicyNotFound struct {
+}
+
+// IsSuccess returns true when this delete policy not found response has a 2xx status code
+func (o *DeletePolicyNotFound) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this delete policy not found response has a 3xx status code
+func (o *DeletePolicyNotFound) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this delete policy not found response has a 4xx status code
+func (o *DeletePolicyNotFound) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this delete policy not found response has a 5xx status code
+func (o *DeletePolicyNotFound) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this delete policy not found response a status code equal to that given
+func (o *DeletePolicyNotFound) IsCode(code int) bool {
+ return code == 404
+}
+
+func (o *DeletePolicyNotFound) Error() string {
+ return fmt.Sprintf("[DELETE /policy][%d] deletePolicyNotFound ", 404)
+}
+
+func (o *DeletePolicyNotFound) String() string {
+ return fmt.Sprintf("[DELETE /policy][%d] deletePolicyNotFound ", 404)
+}
+
+func (o *DeletePolicyNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
+
+// NewDeletePolicyFailure creates a DeletePolicyFailure with default headers values
+func NewDeletePolicyFailure() *DeletePolicyFailure {
+ return &DeletePolicyFailure{}
+}
+
+/*
+DeletePolicyFailure describes a response with status code 500, with default header values.
+
+Error while deleting policy
+*/
+type DeletePolicyFailure struct {
+ Payload models.Error
+}
+
+// IsSuccess returns true when this delete policy failure response has a 2xx status code
+func (o *DeletePolicyFailure) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this delete policy failure response has a 3xx status code
+func (o *DeletePolicyFailure) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this delete policy failure response has a 4xx status code
+func (o *DeletePolicyFailure) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this delete policy failure response has a 5xx status code
+func (o *DeletePolicyFailure) IsServerError() bool {
+ return true
+}
+
+// IsCode returns true when this delete policy failure response a status code equal to that given
+func (o *DeletePolicyFailure) IsCode(code int) bool {
+ return code == 500
+}
+
+func (o *DeletePolicyFailure) Error() string {
+ return fmt.Sprintf("[DELETE /policy][%d] deletePolicyFailure %+v", 500, o.Payload)
+}
+
+func (o *DeletePolicyFailure) String() string {
+ return fmt.Sprintf("[DELETE /policy][%d] deletePolicyFailure %+v", 500, o.Payload)
+}
+
+func (o *DeletePolicyFailure) GetPayload() models.Error {
+ return o.Payload
+}
+
+func (o *DeletePolicyFailure) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_fqdn_cache_id_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_fqdn_cache_id_parameters.go
new file mode 100644
index 000000000..e4da10fca
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_fqdn_cache_id_parameters.go
@@ -0,0 +1,270 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package policy
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewGetFqdnCacheIDParams creates a new GetFqdnCacheIDParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewGetFqdnCacheIDParams() *GetFqdnCacheIDParams {
+ return &GetFqdnCacheIDParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewGetFqdnCacheIDParamsWithTimeout creates a new GetFqdnCacheIDParams object
+// with the ability to set a timeout on a request.
+func NewGetFqdnCacheIDParamsWithTimeout(timeout time.Duration) *GetFqdnCacheIDParams {
+ return &GetFqdnCacheIDParams{
+ timeout: timeout,
+ }
+}
+
+// NewGetFqdnCacheIDParamsWithContext creates a new GetFqdnCacheIDParams object
+// with the ability to set a context for a request.
+func NewGetFqdnCacheIDParamsWithContext(ctx context.Context) *GetFqdnCacheIDParams {
+ return &GetFqdnCacheIDParams{
+ Context: ctx,
+ }
+}
+
+// NewGetFqdnCacheIDParamsWithHTTPClient creates a new GetFqdnCacheIDParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewGetFqdnCacheIDParamsWithHTTPClient(client *http.Client) *GetFqdnCacheIDParams {
+ return &GetFqdnCacheIDParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+GetFqdnCacheIDParams contains all the parameters to send to the API endpoint
+
+ for the get fqdn cache ID operation.
+
+ Typically these are written to a http.Request.
+*/
+type GetFqdnCacheIDParams struct {
+
+ /* Cidr.
+
+ A CIDR range of IPs
+ */
+ Cidr *string
+
+ /* ID.
+
+ String describing an endpoint with the format ``[prefix:]id``. If no prefix
+ is specified, a prefix of ``cilium-local:`` is assumed. Not all endpoints
+ will be addressable by all endpoint ID prefixes with the exception of the
+ local Cilium UUID which is assigned to all endpoints.
+
+ Supported endpoint id prefixes:
+ - cilium-local: Local Cilium endpoint UUID, e.g. cilium-local:3389595
+ - cilium-global: Global Cilium endpoint UUID, e.g. cilium-global:cluster1:nodeX:452343
+ - cni-attachment-id: CNI attachment ID, e.g. cni-attachment-id:22222:eth0
+ - container-id: Container runtime ID, e.g. container-id:22222 (deprecated, may not be unique)
+ - container-name: Container name, e.g. container-name:foobar (deprecated, may not be unique)
+ - pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar (deprecated, may not be unique)
+ - cep-name: cep name for this container if K8s is enabled, e.g. pod-name:default:foobar-net1
+ - docker-endpoint: Docker libnetwork endpoint ID, e.g. docker-endpoint:4444
+
+ */
+ ID string
+
+ /* Matchpattern.
+
+ A toFQDNs compatible matchPattern expression
+ */
+ Matchpattern *string
+
+ /* Source.
+
+ Source from which FQDN entries come from
+ */
+ Source *string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the get fqdn cache ID params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetFqdnCacheIDParams) WithDefaults() *GetFqdnCacheIDParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the get fqdn cache ID params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetFqdnCacheIDParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the get fqdn cache ID params
+func (o *GetFqdnCacheIDParams) WithTimeout(timeout time.Duration) *GetFqdnCacheIDParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the get fqdn cache ID params
+func (o *GetFqdnCacheIDParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the get fqdn cache ID params
+func (o *GetFqdnCacheIDParams) WithContext(ctx context.Context) *GetFqdnCacheIDParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the get fqdn cache ID params
+func (o *GetFqdnCacheIDParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the get fqdn cache ID params
+func (o *GetFqdnCacheIDParams) WithHTTPClient(client *http.Client) *GetFqdnCacheIDParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the get fqdn cache ID params
+func (o *GetFqdnCacheIDParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithCidr adds the cidr to the get fqdn cache ID params
+func (o *GetFqdnCacheIDParams) WithCidr(cidr *string) *GetFqdnCacheIDParams {
+ o.SetCidr(cidr)
+ return o
+}
+
+// SetCidr adds the cidr to the get fqdn cache ID params
+func (o *GetFqdnCacheIDParams) SetCidr(cidr *string) {
+ o.Cidr = cidr
+}
+
+// WithID adds the id to the get fqdn cache ID params
+func (o *GetFqdnCacheIDParams) WithID(id string) *GetFqdnCacheIDParams {
+ o.SetID(id)
+ return o
+}
+
+// SetID adds the id to the get fqdn cache ID params
+func (o *GetFqdnCacheIDParams) SetID(id string) {
+ o.ID = id
+}
+
+// WithMatchpattern adds the matchpattern to the get fqdn cache ID params
+func (o *GetFqdnCacheIDParams) WithMatchpattern(matchpattern *string) *GetFqdnCacheIDParams {
+ o.SetMatchpattern(matchpattern)
+ return o
+}
+
+// SetMatchpattern adds the matchpattern to the get fqdn cache ID params
+func (o *GetFqdnCacheIDParams) SetMatchpattern(matchpattern *string) {
+ o.Matchpattern = matchpattern
+}
+
+// WithSource adds the source to the get fqdn cache ID params
+func (o *GetFqdnCacheIDParams) WithSource(source *string) *GetFqdnCacheIDParams {
+ o.SetSource(source)
+ return o
+}
+
+// SetSource adds the source to the get fqdn cache ID params
+func (o *GetFqdnCacheIDParams) SetSource(source *string) {
+ o.Source = source
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *GetFqdnCacheIDParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ if o.Cidr != nil {
+
+ // query param cidr
+ var qrCidr string
+
+ if o.Cidr != nil {
+ qrCidr = *o.Cidr
+ }
+ qCidr := qrCidr
+ if qCidr != "" {
+
+ if err := r.SetQueryParam("cidr", qCidr); err != nil {
+ return err
+ }
+ }
+ }
+
+ // path param id
+ if err := r.SetPathParam("id", o.ID); err != nil {
+ return err
+ }
+
+ if o.Matchpattern != nil {
+
+ // query param matchpattern
+ var qrMatchpattern string
+
+ if o.Matchpattern != nil {
+ qrMatchpattern = *o.Matchpattern
+ }
+ qMatchpattern := qrMatchpattern
+ if qMatchpattern != "" {
+
+ if err := r.SetQueryParam("matchpattern", qMatchpattern); err != nil {
+ return err
+ }
+ }
+ }
+
+ if o.Source != nil {
+
+ // query param source
+ var qrSource string
+
+ if o.Source != nil {
+ qrSource = *o.Source
+ }
+ qSource := qrSource
+ if qSource != "" {
+
+ if err := r.SetQueryParam("source", qSource); err != nil {
+ return err
+ }
+ }
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_fqdn_cache_id_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_fqdn_cache_id_responses.go
new file mode 100644
index 000000000..7d4297523
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_fqdn_cache_id_responses.go
@@ -0,0 +1,223 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package policy
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/cilium/cilium/api/v1/models"
+)
+
+// GetFqdnCacheIDReader is a Reader for the GetFqdnCacheID structure.
+type GetFqdnCacheIDReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *GetFqdnCacheIDReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewGetFqdnCacheIDOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 400:
+ result := NewGetFqdnCacheIDBadRequest()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ case 404:
+ result := NewGetFqdnCacheIDNotFound()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ default:
+ return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code())
+ }
+}
+
+// NewGetFqdnCacheIDOK creates a GetFqdnCacheIDOK with default headers values
+func NewGetFqdnCacheIDOK() *GetFqdnCacheIDOK {
+ return &GetFqdnCacheIDOK{}
+}
+
+/*
+GetFqdnCacheIDOK describes a response with status code 200, with default header values.
+
+Success
+*/
+type GetFqdnCacheIDOK struct {
+ Payload []*models.DNSLookup
+}
+
+// IsSuccess returns true when this get fqdn cache Id o k response has a 2xx status code
+func (o *GetFqdnCacheIDOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this get fqdn cache Id o k response has a 3xx status code
+func (o *GetFqdnCacheIDOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get fqdn cache Id o k response has a 4xx status code
+func (o *GetFqdnCacheIDOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get fqdn cache Id o k response has a 5xx status code
+func (o *GetFqdnCacheIDOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get fqdn cache Id o k response a status code equal to that given
+func (o *GetFqdnCacheIDOK) IsCode(code int) bool {
+ return code == 200
+}
+
+func (o *GetFqdnCacheIDOK) Error() string {
+ return fmt.Sprintf("[GET /fqdn/cache/{id}][%d] getFqdnCacheIdOK %+v", 200, o.Payload)
+}
+
+func (o *GetFqdnCacheIDOK) String() string {
+ return fmt.Sprintf("[GET /fqdn/cache/{id}][%d] getFqdnCacheIdOK %+v", 200, o.Payload)
+}
+
+func (o *GetFqdnCacheIDOK) GetPayload() []*models.DNSLookup {
+ return o.Payload
+}
+
+func (o *GetFqdnCacheIDOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewGetFqdnCacheIDBadRequest creates a GetFqdnCacheIDBadRequest with default headers values
+func NewGetFqdnCacheIDBadRequest() *GetFqdnCacheIDBadRequest {
+ return &GetFqdnCacheIDBadRequest{}
+}
+
+/*
+GetFqdnCacheIDBadRequest describes a response with status code 400, with default header values.
+
+Invalid request (error parsing parameters)
+*/
+type GetFqdnCacheIDBadRequest struct {
+ Payload models.Error
+}
+
+// IsSuccess returns true when this get fqdn cache Id bad request response has a 2xx status code
+func (o *GetFqdnCacheIDBadRequest) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this get fqdn cache Id bad request response has a 3xx status code
+func (o *GetFqdnCacheIDBadRequest) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get fqdn cache Id bad request response has a 4xx status code
+func (o *GetFqdnCacheIDBadRequest) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this get fqdn cache Id bad request response has a 5xx status code
+func (o *GetFqdnCacheIDBadRequest) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get fqdn cache Id bad request response a status code equal to that given
+func (o *GetFqdnCacheIDBadRequest) IsCode(code int) bool {
+ return code == 400
+}
+
+func (o *GetFqdnCacheIDBadRequest) Error() string {
+ return fmt.Sprintf("[GET /fqdn/cache/{id}][%d] getFqdnCacheIdBadRequest %+v", 400, o.Payload)
+}
+
+func (o *GetFqdnCacheIDBadRequest) String() string {
+ return fmt.Sprintf("[GET /fqdn/cache/{id}][%d] getFqdnCacheIdBadRequest %+v", 400, o.Payload)
+}
+
+func (o *GetFqdnCacheIDBadRequest) GetPayload() models.Error {
+ return o.Payload
+}
+
+func (o *GetFqdnCacheIDBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewGetFqdnCacheIDNotFound creates a GetFqdnCacheIDNotFound with default headers values
+func NewGetFqdnCacheIDNotFound() *GetFqdnCacheIDNotFound {
+ return &GetFqdnCacheIDNotFound{}
+}
+
+/*
+GetFqdnCacheIDNotFound describes a response with status code 404, with default header values.
+
+No DNS data with provided parameters found
+*/
+type GetFqdnCacheIDNotFound struct {
+}
+
+// IsSuccess returns true when this get fqdn cache Id not found response has a 2xx status code
+func (o *GetFqdnCacheIDNotFound) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this get fqdn cache Id not found response has a 3xx status code
+func (o *GetFqdnCacheIDNotFound) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get fqdn cache Id not found response has a 4xx status code
+func (o *GetFqdnCacheIDNotFound) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this get fqdn cache Id not found response has a 5xx status code
+func (o *GetFqdnCacheIDNotFound) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get fqdn cache Id not found response a status code equal to that given
+func (o *GetFqdnCacheIDNotFound) IsCode(code int) bool {
+ return code == 404
+}
+
+func (o *GetFqdnCacheIDNotFound) Error() string {
+ return fmt.Sprintf("[GET /fqdn/cache/{id}][%d] getFqdnCacheIdNotFound ", 404)
+}
+
+func (o *GetFqdnCacheIDNotFound) String() string {
+ return fmt.Sprintf("[GET /fqdn/cache/{id}][%d] getFqdnCacheIdNotFound ", 404)
+}
+
+func (o *GetFqdnCacheIDNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_fqdn_cache_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_fqdn_cache_parameters.go
new file mode 100644
index 000000000..d6eb10e11
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_fqdn_cache_parameters.go
@@ -0,0 +1,234 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package policy
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewGetFqdnCacheParams creates a new GetFqdnCacheParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewGetFqdnCacheParams() *GetFqdnCacheParams {
+ return &GetFqdnCacheParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewGetFqdnCacheParamsWithTimeout creates a new GetFqdnCacheParams object
+// with the ability to set a timeout on a request.
+func NewGetFqdnCacheParamsWithTimeout(timeout time.Duration) *GetFqdnCacheParams {
+ return &GetFqdnCacheParams{
+ timeout: timeout,
+ }
+}
+
+// NewGetFqdnCacheParamsWithContext creates a new GetFqdnCacheParams object
+// with the ability to set a context for a request.
+func NewGetFqdnCacheParamsWithContext(ctx context.Context) *GetFqdnCacheParams {
+ return &GetFqdnCacheParams{
+ Context: ctx,
+ }
+}
+
+// NewGetFqdnCacheParamsWithHTTPClient creates a new GetFqdnCacheParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewGetFqdnCacheParamsWithHTTPClient(client *http.Client) *GetFqdnCacheParams {
+ return &GetFqdnCacheParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+GetFqdnCacheParams contains all the parameters to send to the API endpoint
+
+ for the get fqdn cache operation.
+
+ Typically these are written to a http.Request.
+*/
+type GetFqdnCacheParams struct {
+
+ /* Cidr.
+
+ A CIDR range of IPs
+ */
+ Cidr *string
+
+ /* Matchpattern.
+
+ A toFQDNs compatible matchPattern expression
+ */
+ Matchpattern *string
+
+ /* Source.
+
+ Source from which FQDN entries come from
+ */
+ Source *string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the get fqdn cache params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetFqdnCacheParams) WithDefaults() *GetFqdnCacheParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the get fqdn cache params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetFqdnCacheParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the get fqdn cache params
+func (o *GetFqdnCacheParams) WithTimeout(timeout time.Duration) *GetFqdnCacheParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the get fqdn cache params
+func (o *GetFqdnCacheParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the get fqdn cache params
+func (o *GetFqdnCacheParams) WithContext(ctx context.Context) *GetFqdnCacheParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the get fqdn cache params
+func (o *GetFqdnCacheParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the get fqdn cache params
+func (o *GetFqdnCacheParams) WithHTTPClient(client *http.Client) *GetFqdnCacheParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the get fqdn cache params
+func (o *GetFqdnCacheParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithCidr adds the cidr to the get fqdn cache params
+func (o *GetFqdnCacheParams) WithCidr(cidr *string) *GetFqdnCacheParams {
+ o.SetCidr(cidr)
+ return o
+}
+
+// SetCidr adds the cidr to the get fqdn cache params
+func (o *GetFqdnCacheParams) SetCidr(cidr *string) {
+ o.Cidr = cidr
+}
+
+// WithMatchpattern adds the matchpattern to the get fqdn cache params
+func (o *GetFqdnCacheParams) WithMatchpattern(matchpattern *string) *GetFqdnCacheParams {
+ o.SetMatchpattern(matchpattern)
+ return o
+}
+
+// SetMatchpattern adds the matchpattern to the get fqdn cache params
+func (o *GetFqdnCacheParams) SetMatchpattern(matchpattern *string) {
+ o.Matchpattern = matchpattern
+}
+
+// WithSource adds the source to the get fqdn cache params
+func (o *GetFqdnCacheParams) WithSource(source *string) *GetFqdnCacheParams {
+ o.SetSource(source)
+ return o
+}
+
+// SetSource adds the source to the get fqdn cache params
+func (o *GetFqdnCacheParams) SetSource(source *string) {
+ o.Source = source
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *GetFqdnCacheParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ if o.Cidr != nil {
+
+ // query param cidr
+ var qrCidr string
+
+ if o.Cidr != nil {
+ qrCidr = *o.Cidr
+ }
+ qCidr := qrCidr
+ if qCidr != "" {
+
+ if err := r.SetQueryParam("cidr", qCidr); err != nil {
+ return err
+ }
+ }
+ }
+
+ if o.Matchpattern != nil {
+
+ // query param matchpattern
+ var qrMatchpattern string
+
+ if o.Matchpattern != nil {
+ qrMatchpattern = *o.Matchpattern
+ }
+ qMatchpattern := qrMatchpattern
+ if qMatchpattern != "" {
+
+ if err := r.SetQueryParam("matchpattern", qMatchpattern); err != nil {
+ return err
+ }
+ }
+ }
+
+ if o.Source != nil {
+
+ // query param source
+ var qrSource string
+
+ if o.Source != nil {
+ qrSource = *o.Source
+ }
+ qSource := qrSource
+ if qSource != "" {
+
+ if err := r.SetQueryParam("source", qSource); err != nil {
+ return err
+ }
+ }
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_fqdn_cache_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_fqdn_cache_responses.go
new file mode 100644
index 000000000..775a0fc20
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_fqdn_cache_responses.go
@@ -0,0 +1,223 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package policy
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/cilium/cilium/api/v1/models"
+)
+
+// GetFqdnCacheReader is a Reader for the GetFqdnCache structure.
+type GetFqdnCacheReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *GetFqdnCacheReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewGetFqdnCacheOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 400:
+ result := NewGetFqdnCacheBadRequest()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ case 404:
+ result := NewGetFqdnCacheNotFound()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ default:
+ return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code())
+ }
+}
+
+// NewGetFqdnCacheOK creates a GetFqdnCacheOK with default headers values
+func NewGetFqdnCacheOK() *GetFqdnCacheOK {
+ return &GetFqdnCacheOK{}
+}
+
+/*
+GetFqdnCacheOK describes a response with status code 200, with default header values.
+
+Success
+*/
+type GetFqdnCacheOK struct {
+ Payload []*models.DNSLookup
+}
+
+// IsSuccess returns true when this get fqdn cache o k response has a 2xx status code
+func (o *GetFqdnCacheOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this get fqdn cache o k response has a 3xx status code
+func (o *GetFqdnCacheOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get fqdn cache o k response has a 4xx status code
+func (o *GetFqdnCacheOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get fqdn cache o k response has a 5xx status code
+func (o *GetFqdnCacheOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get fqdn cache o k response a status code equal to that given
+func (o *GetFqdnCacheOK) IsCode(code int) bool {
+ return code == 200
+}
+
+func (o *GetFqdnCacheOK) Error() string {
+ return fmt.Sprintf("[GET /fqdn/cache][%d] getFqdnCacheOK %+v", 200, o.Payload)
+}
+
+func (o *GetFqdnCacheOK) String() string {
+ return fmt.Sprintf("[GET /fqdn/cache][%d] getFqdnCacheOK %+v", 200, o.Payload)
+}
+
+func (o *GetFqdnCacheOK) GetPayload() []*models.DNSLookup {
+ return o.Payload
+}
+
+func (o *GetFqdnCacheOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewGetFqdnCacheBadRequest creates a GetFqdnCacheBadRequest with default headers values
+func NewGetFqdnCacheBadRequest() *GetFqdnCacheBadRequest {
+ return &GetFqdnCacheBadRequest{}
+}
+
+/*
+GetFqdnCacheBadRequest describes a response with status code 400, with default header values.
+
+Invalid request (error parsing parameters)
+*/
+type GetFqdnCacheBadRequest struct {
+ Payload models.Error
+}
+
+// IsSuccess returns true when this get fqdn cache bad request response has a 2xx status code
+func (o *GetFqdnCacheBadRequest) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this get fqdn cache bad request response has a 3xx status code
+func (o *GetFqdnCacheBadRequest) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get fqdn cache bad request response has a 4xx status code
+func (o *GetFqdnCacheBadRequest) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this get fqdn cache bad request response has a 5xx status code
+func (o *GetFqdnCacheBadRequest) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get fqdn cache bad request response a status code equal to that given
+func (o *GetFqdnCacheBadRequest) IsCode(code int) bool {
+ return code == 400
+}
+
+func (o *GetFqdnCacheBadRequest) Error() string {
+ return fmt.Sprintf("[GET /fqdn/cache][%d] getFqdnCacheBadRequest %+v", 400, o.Payload)
+}
+
+func (o *GetFqdnCacheBadRequest) String() string {
+ return fmt.Sprintf("[GET /fqdn/cache][%d] getFqdnCacheBadRequest %+v", 400, o.Payload)
+}
+
+func (o *GetFqdnCacheBadRequest) GetPayload() models.Error {
+ return o.Payload
+}
+
+func (o *GetFqdnCacheBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewGetFqdnCacheNotFound creates a GetFqdnCacheNotFound with default headers values
+func NewGetFqdnCacheNotFound() *GetFqdnCacheNotFound {
+ return &GetFqdnCacheNotFound{}
+}
+
+/*
+GetFqdnCacheNotFound describes a response with status code 404, with default header values.
+
+No DNS data with provided parameters found
+*/
+type GetFqdnCacheNotFound struct {
+}
+
+// IsSuccess returns true when this get fqdn cache not found response has a 2xx status code
+func (o *GetFqdnCacheNotFound) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this get fqdn cache not found response has a 3xx status code
+func (o *GetFqdnCacheNotFound) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get fqdn cache not found response has a 4xx status code
+func (o *GetFqdnCacheNotFound) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this get fqdn cache not found response has a 5xx status code
+func (o *GetFqdnCacheNotFound) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get fqdn cache not found response a status code equal to that given
+func (o *GetFqdnCacheNotFound) IsCode(code int) bool {
+ return code == 404
+}
+
+func (o *GetFqdnCacheNotFound) Error() string {
+ return fmt.Sprintf("[GET /fqdn/cache][%d] getFqdnCacheNotFound ", 404)
+}
+
+func (o *GetFqdnCacheNotFound) String() string {
+ return fmt.Sprintf("[GET /fqdn/cache][%d] getFqdnCacheNotFound ", 404)
+}
+
+func (o *GetFqdnCacheNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_fqdn_names_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_fqdn_names_parameters.go
new file mode 100644
index 000000000..1b6743281
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_fqdn_names_parameters.go
@@ -0,0 +1,131 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package policy
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewGetFqdnNamesParams creates a new GetFqdnNamesParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewGetFqdnNamesParams() *GetFqdnNamesParams {
+ return &GetFqdnNamesParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewGetFqdnNamesParamsWithTimeout creates a new GetFqdnNamesParams object
+// with the ability to set a timeout on a request.
+func NewGetFqdnNamesParamsWithTimeout(timeout time.Duration) *GetFqdnNamesParams {
+ return &GetFqdnNamesParams{
+ timeout: timeout,
+ }
+}
+
+// NewGetFqdnNamesParamsWithContext creates a new GetFqdnNamesParams object
+// with the ability to set a context for a request.
+func NewGetFqdnNamesParamsWithContext(ctx context.Context) *GetFqdnNamesParams {
+ return &GetFqdnNamesParams{
+ Context: ctx,
+ }
+}
+
+// NewGetFqdnNamesParamsWithHTTPClient creates a new GetFqdnNamesParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewGetFqdnNamesParamsWithHTTPClient(client *http.Client) *GetFqdnNamesParams {
+ return &GetFqdnNamesParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+GetFqdnNamesParams contains all the parameters to send to the API endpoint
+
+ for the get fqdn names operation.
+
+ Typically these are written to a http.Request.
+*/
+type GetFqdnNamesParams struct {
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the get fqdn names params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetFqdnNamesParams) WithDefaults() *GetFqdnNamesParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the get fqdn names params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetFqdnNamesParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the get fqdn names params
+func (o *GetFqdnNamesParams) WithTimeout(timeout time.Duration) *GetFqdnNamesParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the get fqdn names params
+func (o *GetFqdnNamesParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the get fqdn names params
+func (o *GetFqdnNamesParams) WithContext(ctx context.Context) *GetFqdnNamesParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the get fqdn names params
+func (o *GetFqdnNamesParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the get fqdn names params
+func (o *GetFqdnNamesParams) WithHTTPClient(client *http.Client) *GetFqdnNamesParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the get fqdn names params
+func (o *GetFqdnNamesParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *GetFqdnNamesParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_fqdn_names_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_fqdn_names_responses.go
new file mode 100644
index 000000000..b2b160db6
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_fqdn_names_responses.go
@@ -0,0 +1,168 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package policy
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/cilium/cilium/api/v1/models"
+)
+
+// GetFqdnNamesReader is a Reader for the GetFqdnNames structure.
+type GetFqdnNamesReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *GetFqdnNamesReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewGetFqdnNamesOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 400:
+ result := NewGetFqdnNamesBadRequest()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ default:
+ return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code())
+ }
+}
+
+// NewGetFqdnNamesOK creates a GetFqdnNamesOK with default headers values
+func NewGetFqdnNamesOK() *GetFqdnNamesOK {
+ return &GetFqdnNamesOK{}
+}
+
+/*
+GetFqdnNamesOK describes a response with status code 200, with default header values.
+
+Success
+*/
+type GetFqdnNamesOK struct {
+ Payload *models.NameManager
+}
+
+// IsSuccess returns true when this get fqdn names o k response has a 2xx status code
+func (o *GetFqdnNamesOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this get fqdn names o k response has a 3xx status code
+func (o *GetFqdnNamesOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get fqdn names o k response has a 4xx status code
+func (o *GetFqdnNamesOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get fqdn names o k response has a 5xx status code
+func (o *GetFqdnNamesOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get fqdn names o k response a status code equal to that given
+func (o *GetFqdnNamesOK) IsCode(code int) bool {
+ return code == 200
+}
+
+func (o *GetFqdnNamesOK) Error() string {
+ return fmt.Sprintf("[GET /fqdn/names][%d] getFqdnNamesOK %+v", 200, o.Payload)
+}
+
+func (o *GetFqdnNamesOK) String() string {
+ return fmt.Sprintf("[GET /fqdn/names][%d] getFqdnNamesOK %+v", 200, o.Payload)
+}
+
+func (o *GetFqdnNamesOK) GetPayload() *models.NameManager {
+ return o.Payload
+}
+
+func (o *GetFqdnNamesOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ o.Payload = new(models.NameManager)
+
+ // response payload
+ if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewGetFqdnNamesBadRequest creates a GetFqdnNamesBadRequest with default headers values
+func NewGetFqdnNamesBadRequest() *GetFqdnNamesBadRequest {
+ return &GetFqdnNamesBadRequest{}
+}
+
+/*
+GetFqdnNamesBadRequest describes a response with status code 400, with default header values.
+
+Invalid request (error parsing parameters)
+*/
+type GetFqdnNamesBadRequest struct {
+ Payload models.Error
+}
+
+// IsSuccess returns true when this get fqdn names bad request response has a 2xx status code
+func (o *GetFqdnNamesBadRequest) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this get fqdn names bad request response has a 3xx status code
+func (o *GetFqdnNamesBadRequest) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get fqdn names bad request response has a 4xx status code
+func (o *GetFqdnNamesBadRequest) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this get fqdn names bad request response has a 5xx status code
+func (o *GetFqdnNamesBadRequest) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get fqdn names bad request response a status code equal to that given
+func (o *GetFqdnNamesBadRequest) IsCode(code int) bool {
+ return code == 400
+}
+
+func (o *GetFqdnNamesBadRequest) Error() string {
+ return fmt.Sprintf("[GET /fqdn/names][%d] getFqdnNamesBadRequest %+v", 400, o.Payload)
+}
+
+func (o *GetFqdnNamesBadRequest) String() string {
+ return fmt.Sprintf("[GET /fqdn/names][%d] getFqdnNamesBadRequest %+v", 400, o.Payload)
+}
+
+func (o *GetFqdnNamesBadRequest) GetPayload() models.Error {
+ return o.Payload
+}
+
+func (o *GetFqdnNamesBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_identity_endpoints_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_identity_endpoints_parameters.go
new file mode 100644
index 000000000..6a2182829
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_identity_endpoints_parameters.go
@@ -0,0 +1,131 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package policy
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewGetIdentityEndpointsParams creates a new GetIdentityEndpointsParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewGetIdentityEndpointsParams() *GetIdentityEndpointsParams {
+ return &GetIdentityEndpointsParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewGetIdentityEndpointsParamsWithTimeout creates a new GetIdentityEndpointsParams object
+// with the ability to set a timeout on a request.
+func NewGetIdentityEndpointsParamsWithTimeout(timeout time.Duration) *GetIdentityEndpointsParams {
+ return &GetIdentityEndpointsParams{
+ timeout: timeout,
+ }
+}
+
+// NewGetIdentityEndpointsParamsWithContext creates a new GetIdentityEndpointsParams object
+// with the ability to set a context for a request.
+func NewGetIdentityEndpointsParamsWithContext(ctx context.Context) *GetIdentityEndpointsParams {
+ return &GetIdentityEndpointsParams{
+ Context: ctx,
+ }
+}
+
+// NewGetIdentityEndpointsParamsWithHTTPClient creates a new GetIdentityEndpointsParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewGetIdentityEndpointsParamsWithHTTPClient(client *http.Client) *GetIdentityEndpointsParams {
+ return &GetIdentityEndpointsParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+GetIdentityEndpointsParams contains all the parameters to send to the API endpoint
+
+ for the get identity endpoints operation.
+
+ Typically these are written to a http.Request.
+*/
+type GetIdentityEndpointsParams struct {
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the get identity endpoints params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetIdentityEndpointsParams) WithDefaults() *GetIdentityEndpointsParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the get identity endpoints params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetIdentityEndpointsParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the get identity endpoints params
+func (o *GetIdentityEndpointsParams) WithTimeout(timeout time.Duration) *GetIdentityEndpointsParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the get identity endpoints params
+func (o *GetIdentityEndpointsParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the get identity endpoints params
+func (o *GetIdentityEndpointsParams) WithContext(ctx context.Context) *GetIdentityEndpointsParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the get identity endpoints params
+func (o *GetIdentityEndpointsParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the get identity endpoints params
+func (o *GetIdentityEndpointsParams) WithHTTPClient(client *http.Client) *GetIdentityEndpointsParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the get identity endpoints params
+func (o *GetIdentityEndpointsParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *GetIdentityEndpointsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_identity_endpoints_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_identity_endpoints_responses.go
new file mode 100644
index 000000000..0608766c8
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_identity_endpoints_responses.go
@@ -0,0 +1,156 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package policy
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/cilium/cilium/api/v1/models"
+)
+
+// GetIdentityEndpointsReader is a Reader for the GetIdentityEndpoints structure.
+type GetIdentityEndpointsReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *GetIdentityEndpointsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewGetIdentityEndpointsOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 404:
+ result := NewGetIdentityEndpointsNotFound()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ default:
+ return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code())
+ }
+}
+
+// NewGetIdentityEndpointsOK creates a GetIdentityEndpointsOK with default headers values
+func NewGetIdentityEndpointsOK() *GetIdentityEndpointsOK {
+ return &GetIdentityEndpointsOK{}
+}
+
+/*
+GetIdentityEndpointsOK describes a response with status code 200, with default header values.
+
+Success
+*/
+type GetIdentityEndpointsOK struct {
+ Payload []*models.IdentityEndpoints
+}
+
+// IsSuccess returns true when this get identity endpoints o k response has a 2xx status code
+func (o *GetIdentityEndpointsOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this get identity endpoints o k response has a 3xx status code
+func (o *GetIdentityEndpointsOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get identity endpoints o k response has a 4xx status code
+func (o *GetIdentityEndpointsOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get identity endpoints o k response has a 5xx status code
+func (o *GetIdentityEndpointsOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get identity endpoints o k response a status code equal to that given
+func (o *GetIdentityEndpointsOK) IsCode(code int) bool {
+ return code == 200
+}
+
+func (o *GetIdentityEndpointsOK) Error() string {
+ return fmt.Sprintf("[GET /identity/endpoints][%d] getIdentityEndpointsOK %+v", 200, o.Payload)
+}
+
+func (o *GetIdentityEndpointsOK) String() string {
+ return fmt.Sprintf("[GET /identity/endpoints][%d] getIdentityEndpointsOK %+v", 200, o.Payload)
+}
+
+func (o *GetIdentityEndpointsOK) GetPayload() []*models.IdentityEndpoints {
+ return o.Payload
+}
+
+func (o *GetIdentityEndpointsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewGetIdentityEndpointsNotFound creates a GetIdentityEndpointsNotFound with default headers values
+func NewGetIdentityEndpointsNotFound() *GetIdentityEndpointsNotFound {
+ return &GetIdentityEndpointsNotFound{}
+}
+
+/*
+GetIdentityEndpointsNotFound describes a response with status code 404, with default header values.
+
+Set of identities which are being used by local endpoints could not be found.
+*/
+type GetIdentityEndpointsNotFound struct {
+}
+
+// IsSuccess returns true when this get identity endpoints not found response has a 2xx status code
+func (o *GetIdentityEndpointsNotFound) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this get identity endpoints not found response has a 3xx status code
+func (o *GetIdentityEndpointsNotFound) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get identity endpoints not found response has a 4xx status code
+func (o *GetIdentityEndpointsNotFound) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this get identity endpoints not found response has a 5xx status code
+func (o *GetIdentityEndpointsNotFound) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get identity endpoints not found response a status code equal to that given
+func (o *GetIdentityEndpointsNotFound) IsCode(code int) bool {
+ return code == 404
+}
+
+func (o *GetIdentityEndpointsNotFound) Error() string {
+ return fmt.Sprintf("[GET /identity/endpoints][%d] getIdentityEndpointsNotFound ", 404)
+}
+
+func (o *GetIdentityEndpointsNotFound) String() string {
+ return fmt.Sprintf("[GET /identity/endpoints][%d] getIdentityEndpointsNotFound ", 404)
+}
+
+func (o *GetIdentityEndpointsNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_identity_id_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_identity_id_parameters.go
new file mode 100644
index 000000000..f17fa6651
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_identity_id_parameters.go
@@ -0,0 +1,155 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package policy
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewGetIdentityIDParams creates a new GetIdentityIDParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewGetIdentityIDParams() *GetIdentityIDParams {
+ return &GetIdentityIDParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewGetIdentityIDParamsWithTimeout creates a new GetIdentityIDParams object
+// with the ability to set a timeout on a request.
+func NewGetIdentityIDParamsWithTimeout(timeout time.Duration) *GetIdentityIDParams {
+ return &GetIdentityIDParams{
+ timeout: timeout,
+ }
+}
+
+// NewGetIdentityIDParamsWithContext creates a new GetIdentityIDParams object
+// with the ability to set a context for a request.
+func NewGetIdentityIDParamsWithContext(ctx context.Context) *GetIdentityIDParams {
+ return &GetIdentityIDParams{
+ Context: ctx,
+ }
+}
+
+// NewGetIdentityIDParamsWithHTTPClient creates a new GetIdentityIDParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewGetIdentityIDParamsWithHTTPClient(client *http.Client) *GetIdentityIDParams {
+ return &GetIdentityIDParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+GetIdentityIDParams contains all the parameters to send to the API endpoint
+
+ for the get identity ID operation.
+
+ Typically these are written to a http.Request.
+*/
+type GetIdentityIDParams struct {
+
+ /* ID.
+
+ Cluster wide unique identifier of a security identity.
+
+ */
+ ID string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the get identity ID params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetIdentityIDParams) WithDefaults() *GetIdentityIDParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the get identity ID params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetIdentityIDParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the get identity ID params
+func (o *GetIdentityIDParams) WithTimeout(timeout time.Duration) *GetIdentityIDParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the get identity ID params
+func (o *GetIdentityIDParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the get identity ID params
+func (o *GetIdentityIDParams) WithContext(ctx context.Context) *GetIdentityIDParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the get identity ID params
+func (o *GetIdentityIDParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the get identity ID params
+func (o *GetIdentityIDParams) WithHTTPClient(client *http.Client) *GetIdentityIDParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the get identity ID params
+func (o *GetIdentityIDParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithID adds the id to the get identity ID params
+func (o *GetIdentityIDParams) WithID(id string) *GetIdentityIDParams {
+ o.SetID(id)
+ return o
+}
+
+// SetID adds the id to the get identity ID params
+func (o *GetIdentityIDParams) SetID(id string) {
+ o.ID = id
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *GetIdentityIDParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ // path param id
+ if err := r.SetPathParam("id", o.ID); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_identity_id_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_identity_id_responses.go
new file mode 100644
index 000000000..c6d55d0ca
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_identity_id_responses.go
@@ -0,0 +1,349 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package policy
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/cilium/cilium/api/v1/models"
+)
+
+// GetIdentityIDReader is a Reader for the GetIdentityID structure.
+type GetIdentityIDReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *GetIdentityIDReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewGetIdentityIDOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 400:
+ result := NewGetIdentityIDBadRequest()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ case 404:
+ result := NewGetIdentityIDNotFound()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ case 520:
+ result := NewGetIdentityIDUnreachable()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ case 521:
+ result := NewGetIdentityIDInvalidStorageFormat()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ default:
+ return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code())
+ }
+}
+
+// NewGetIdentityIDOK creates a GetIdentityIDOK with default headers values
+func NewGetIdentityIDOK() *GetIdentityIDOK {
+ return &GetIdentityIDOK{}
+}
+
+/*
+GetIdentityIDOK describes a response with status code 200, with default header values.
+
+Success
+*/
+type GetIdentityIDOK struct {
+ Payload *models.Identity
+}
+
+// IsSuccess returns true when this get identity Id o k response has a 2xx status code
+func (o *GetIdentityIDOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this get identity Id o k response has a 3xx status code
+func (o *GetIdentityIDOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get identity Id o k response has a 4xx status code
+func (o *GetIdentityIDOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get identity Id o k response has a 5xx status code
+func (o *GetIdentityIDOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get identity Id o k response a status code equal to that given
+func (o *GetIdentityIDOK) IsCode(code int) bool {
+ return code == 200
+}
+
+func (o *GetIdentityIDOK) Error() string {
+ return fmt.Sprintf("[GET /identity/{id}][%d] getIdentityIdOK %+v", 200, o.Payload)
+}
+
+func (o *GetIdentityIDOK) String() string {
+ return fmt.Sprintf("[GET /identity/{id}][%d] getIdentityIdOK %+v", 200, o.Payload)
+}
+
+func (o *GetIdentityIDOK) GetPayload() *models.Identity {
+ return o.Payload
+}
+
+func (o *GetIdentityIDOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ o.Payload = new(models.Identity)
+
+ // response payload
+ if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewGetIdentityIDBadRequest creates a GetIdentityIDBadRequest with default headers values
+func NewGetIdentityIDBadRequest() *GetIdentityIDBadRequest {
+ return &GetIdentityIDBadRequest{}
+}
+
+/*
+GetIdentityIDBadRequest describes a response with status code 400, with default header values.
+
+Invalid identity provided
+*/
+type GetIdentityIDBadRequest struct {
+}
+
+// IsSuccess returns true when this get identity Id bad request response has a 2xx status code
+func (o *GetIdentityIDBadRequest) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this get identity Id bad request response has a 3xx status code
+func (o *GetIdentityIDBadRequest) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get identity Id bad request response has a 4xx status code
+func (o *GetIdentityIDBadRequest) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this get identity Id bad request response has a 5xx status code
+func (o *GetIdentityIDBadRequest) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get identity Id bad request response a status code equal to that given
+func (o *GetIdentityIDBadRequest) IsCode(code int) bool {
+ return code == 400
+}
+
+func (o *GetIdentityIDBadRequest) Error() string {
+ return fmt.Sprintf("[GET /identity/{id}][%d] getIdentityIdBadRequest ", 400)
+}
+
+func (o *GetIdentityIDBadRequest) String() string {
+ return fmt.Sprintf("[GET /identity/{id}][%d] getIdentityIdBadRequest ", 400)
+}
+
+func (o *GetIdentityIDBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
+
+// NewGetIdentityIDNotFound creates a GetIdentityIDNotFound with default headers values
+func NewGetIdentityIDNotFound() *GetIdentityIDNotFound {
+ return &GetIdentityIDNotFound{}
+}
+
+/*
+GetIdentityIDNotFound describes a response with status code 404, with default header values.
+
+Identity not found
+*/
+type GetIdentityIDNotFound struct {
+}
+
+// IsSuccess returns true when this get identity Id not found response has a 2xx status code
+func (o *GetIdentityIDNotFound) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this get identity Id not found response has a 3xx status code
+func (o *GetIdentityIDNotFound) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get identity Id not found response has a 4xx status code
+func (o *GetIdentityIDNotFound) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this get identity Id not found response has a 5xx status code
+func (o *GetIdentityIDNotFound) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get identity Id not found response a status code equal to that given
+func (o *GetIdentityIDNotFound) IsCode(code int) bool {
+ return code == 404
+}
+
+func (o *GetIdentityIDNotFound) Error() string {
+ return fmt.Sprintf("[GET /identity/{id}][%d] getIdentityIdNotFound ", 404)
+}
+
+func (o *GetIdentityIDNotFound) String() string {
+ return fmt.Sprintf("[GET /identity/{id}][%d] getIdentityIdNotFound ", 404)
+}
+
+func (o *GetIdentityIDNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
+
+// NewGetIdentityIDUnreachable creates a GetIdentityIDUnreachable with default headers values
+func NewGetIdentityIDUnreachable() *GetIdentityIDUnreachable {
+ return &GetIdentityIDUnreachable{}
+}
+
+/*
+GetIdentityIDUnreachable describes a response with status code 520, with default header values.
+
+Identity storage unreachable. Likely a network problem.
+*/
+type GetIdentityIDUnreachable struct {
+ Payload models.Error
+}
+
+// IsSuccess returns true when this get identity Id unreachable response has a 2xx status code
+func (o *GetIdentityIDUnreachable) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this get identity Id unreachable response has a 3xx status code
+func (o *GetIdentityIDUnreachable) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get identity Id unreachable response has a 4xx status code
+func (o *GetIdentityIDUnreachable) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get identity Id unreachable response has a 5xx status code
+func (o *GetIdentityIDUnreachable) IsServerError() bool {
+ return true
+}
+
+// IsCode returns true when this get identity Id unreachable response a status code equal to that given
+func (o *GetIdentityIDUnreachable) IsCode(code int) bool {
+ return code == 520
+}
+
+func (o *GetIdentityIDUnreachable) Error() string {
+ return fmt.Sprintf("[GET /identity/{id}][%d] getIdentityIdUnreachable %+v", 520, o.Payload)
+}
+
+func (o *GetIdentityIDUnreachable) String() string {
+ return fmt.Sprintf("[GET /identity/{id}][%d] getIdentityIdUnreachable %+v", 520, o.Payload)
+}
+
+func (o *GetIdentityIDUnreachable) GetPayload() models.Error {
+ return o.Payload
+}
+
+func (o *GetIdentityIDUnreachable) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewGetIdentityIDInvalidStorageFormat creates a GetIdentityIDInvalidStorageFormat with default headers values
+func NewGetIdentityIDInvalidStorageFormat() *GetIdentityIDInvalidStorageFormat {
+ return &GetIdentityIDInvalidStorageFormat{}
+}
+
+/*
+GetIdentityIDInvalidStorageFormat describes a response with status code 521, with default header values.
+
+Invalid identity format in storage
+*/
+type GetIdentityIDInvalidStorageFormat struct {
+ Payload models.Error
+}
+
+// IsSuccess returns true when this get identity Id invalid storage format response has a 2xx status code
+func (o *GetIdentityIDInvalidStorageFormat) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this get identity Id invalid storage format response has a 3xx status code
+func (o *GetIdentityIDInvalidStorageFormat) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get identity Id invalid storage format response has a 4xx status code
+func (o *GetIdentityIDInvalidStorageFormat) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get identity Id invalid storage format response has a 5xx status code
+func (o *GetIdentityIDInvalidStorageFormat) IsServerError() bool {
+ return true
+}
+
+// IsCode returns true when this get identity Id invalid storage format response a status code equal to that given
+func (o *GetIdentityIDInvalidStorageFormat) IsCode(code int) bool {
+ return code == 521
+}
+
+func (o *GetIdentityIDInvalidStorageFormat) Error() string {
+ return fmt.Sprintf("[GET /identity/{id}][%d] getIdentityIdInvalidStorageFormat %+v", 521, o.Payload)
+}
+
+func (o *GetIdentityIDInvalidStorageFormat) String() string {
+ return fmt.Sprintf("[GET /identity/{id}][%d] getIdentityIdInvalidStorageFormat %+v", 521, o.Payload)
+}
+
+func (o *GetIdentityIDInvalidStorageFormat) GetPayload() models.Error {
+ return o.Payload
+}
+
+func (o *GetIdentityIDInvalidStorageFormat) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_identity_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_identity_parameters.go
new file mode 100644
index 000000000..a5a0fe298
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_identity_parameters.go
@@ -0,0 +1,157 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package policy
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/cilium/cilium/api/v1/models"
+)
+
+// NewGetIdentityParams creates a new GetIdentityParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewGetIdentityParams() *GetIdentityParams {
+ return &GetIdentityParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewGetIdentityParamsWithTimeout creates a new GetIdentityParams object
+// with the ability to set a timeout on a request.
+func NewGetIdentityParamsWithTimeout(timeout time.Duration) *GetIdentityParams {
+ return &GetIdentityParams{
+ timeout: timeout,
+ }
+}
+
+// NewGetIdentityParamsWithContext creates a new GetIdentityParams object
+// with the ability to set a context for a request.
+func NewGetIdentityParamsWithContext(ctx context.Context) *GetIdentityParams {
+ return &GetIdentityParams{
+ Context: ctx,
+ }
+}
+
+// NewGetIdentityParamsWithHTTPClient creates a new GetIdentityParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewGetIdentityParamsWithHTTPClient(client *http.Client) *GetIdentityParams {
+ return &GetIdentityParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+GetIdentityParams contains all the parameters to send to the API endpoint
+
+ for the get identity operation.
+
+ Typically these are written to a http.Request.
+*/
+type GetIdentityParams struct {
+
+ /* Labels.
+
+ List of labels
+
+ */
+ Labels models.Labels
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the get identity params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetIdentityParams) WithDefaults() *GetIdentityParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the get identity params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetIdentityParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the get identity params
+func (o *GetIdentityParams) WithTimeout(timeout time.Duration) *GetIdentityParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the get identity params
+func (o *GetIdentityParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the get identity params
+func (o *GetIdentityParams) WithContext(ctx context.Context) *GetIdentityParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the get identity params
+func (o *GetIdentityParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the get identity params
+func (o *GetIdentityParams) WithHTTPClient(client *http.Client) *GetIdentityParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the get identity params
+func (o *GetIdentityParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithLabels adds the labels to the get identity params
+func (o *GetIdentityParams) WithLabels(labels models.Labels) *GetIdentityParams {
+ o.SetLabels(labels)
+ return o
+}
+
+// SetLabels adds the labels to the get identity params
+func (o *GetIdentityParams) SetLabels(labels models.Labels) {
+ o.Labels = labels
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *GetIdentityParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+ if o.Labels != nil {
+ if err := r.SetBodyParam(o.Labels); err != nil {
+ return err
+ }
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_identity_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_identity_responses.go
new file mode 100644
index 000000000..64ea45296
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_identity_responses.go
@@ -0,0 +1,290 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package policy
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/cilium/cilium/api/v1/models"
+)
+
+// GetIdentityReader is a Reader for the GetIdentity structure.
+type GetIdentityReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *GetIdentityReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewGetIdentityOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 404:
+ result := NewGetIdentityNotFound()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ case 520:
+ result := NewGetIdentityUnreachable()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ case 521:
+ result := NewGetIdentityInvalidStorageFormat()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ default:
+ return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code())
+ }
+}
+
+// NewGetIdentityOK creates a GetIdentityOK with default headers values
+func NewGetIdentityOK() *GetIdentityOK {
+ return &GetIdentityOK{}
+}
+
+/*
+GetIdentityOK describes a response with status code 200, with default header values.
+
+Success
+*/
+type GetIdentityOK struct {
+ Payload []*models.Identity
+}
+
+// IsSuccess returns true when this get identity o k response has a 2xx status code
+func (o *GetIdentityOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this get identity o k response has a 3xx status code
+func (o *GetIdentityOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get identity o k response has a 4xx status code
+func (o *GetIdentityOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get identity o k response has a 5xx status code
+func (o *GetIdentityOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get identity o k response a status code equal to that given
+func (o *GetIdentityOK) IsCode(code int) bool {
+ return code == 200
+}
+
+func (o *GetIdentityOK) Error() string {
+ return fmt.Sprintf("[GET /identity][%d] getIdentityOK %+v", 200, o.Payload)
+}
+
+func (o *GetIdentityOK) String() string {
+ return fmt.Sprintf("[GET /identity][%d] getIdentityOK %+v", 200, o.Payload)
+}
+
+func (o *GetIdentityOK) GetPayload() []*models.Identity {
+ return o.Payload
+}
+
+func (o *GetIdentityOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewGetIdentityNotFound creates a GetIdentityNotFound with default headers values
+func NewGetIdentityNotFound() *GetIdentityNotFound {
+ return &GetIdentityNotFound{}
+}
+
+/*
+GetIdentityNotFound describes a response with status code 404, with default header values.
+
+Identities with provided parameters not found
+*/
+type GetIdentityNotFound struct {
+}
+
+// IsSuccess returns true when this get identity not found response has a 2xx status code
+func (o *GetIdentityNotFound) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this get identity not found response has a 3xx status code
+func (o *GetIdentityNotFound) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get identity not found response has a 4xx status code
+func (o *GetIdentityNotFound) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this get identity not found response has a 5xx status code
+func (o *GetIdentityNotFound) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get identity not found response a status code equal to that given
+func (o *GetIdentityNotFound) IsCode(code int) bool {
+ return code == 404
+}
+
+func (o *GetIdentityNotFound) Error() string {
+ return fmt.Sprintf("[GET /identity][%d] getIdentityNotFound ", 404)
+}
+
+func (o *GetIdentityNotFound) String() string {
+ return fmt.Sprintf("[GET /identity][%d] getIdentityNotFound ", 404)
+}
+
+func (o *GetIdentityNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
+
+// NewGetIdentityUnreachable creates a GetIdentityUnreachable with default headers values
+func NewGetIdentityUnreachable() *GetIdentityUnreachable {
+ return &GetIdentityUnreachable{}
+}
+
+/*
+GetIdentityUnreachable describes a response with status code 520, with default header values.
+
+Identity storage unreachable. Likely a network problem.
+*/
+type GetIdentityUnreachable struct {
+ Payload models.Error
+}
+
+// IsSuccess returns true when this get identity unreachable response has a 2xx status code
+func (o *GetIdentityUnreachable) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this get identity unreachable response has a 3xx status code
+func (o *GetIdentityUnreachable) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get identity unreachable response has a 4xx status code
+func (o *GetIdentityUnreachable) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get identity unreachable response has a 5xx status code
+func (o *GetIdentityUnreachable) IsServerError() bool {
+ return true
+}
+
+// IsCode returns true when this get identity unreachable response a status code equal to that given
+func (o *GetIdentityUnreachable) IsCode(code int) bool {
+ return code == 520
+}
+
+func (o *GetIdentityUnreachable) Error() string {
+ return fmt.Sprintf("[GET /identity][%d] getIdentityUnreachable %+v", 520, o.Payload)
+}
+
+func (o *GetIdentityUnreachable) String() string {
+ return fmt.Sprintf("[GET /identity][%d] getIdentityUnreachable %+v", 520, o.Payload)
+}
+
+func (o *GetIdentityUnreachable) GetPayload() models.Error {
+ return o.Payload
+}
+
+func (o *GetIdentityUnreachable) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewGetIdentityInvalidStorageFormat creates a GetIdentityInvalidStorageFormat with default headers values
+func NewGetIdentityInvalidStorageFormat() *GetIdentityInvalidStorageFormat {
+ return &GetIdentityInvalidStorageFormat{}
+}
+
+/*
+GetIdentityInvalidStorageFormat describes a response with status code 521, with default header values.
+
+Invalid identity format in storage
+*/
+type GetIdentityInvalidStorageFormat struct {
+ Payload models.Error
+}
+
+// IsSuccess returns true when this get identity invalid storage format response has a 2xx status code
+func (o *GetIdentityInvalidStorageFormat) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this get identity invalid storage format response has a 3xx status code
+func (o *GetIdentityInvalidStorageFormat) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get identity invalid storage format response has a 4xx status code
+func (o *GetIdentityInvalidStorageFormat) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get identity invalid storage format response has a 5xx status code
+func (o *GetIdentityInvalidStorageFormat) IsServerError() bool {
+ return true
+}
+
+// IsCode returns true when this get identity invalid storage format response a status code equal to that given
+func (o *GetIdentityInvalidStorageFormat) IsCode(code int) bool {
+ return code == 521
+}
+
+func (o *GetIdentityInvalidStorageFormat) Error() string {
+ return fmt.Sprintf("[GET /identity][%d] getIdentityInvalidStorageFormat %+v", 521, o.Payload)
+}
+
+func (o *GetIdentityInvalidStorageFormat) String() string {
+ return fmt.Sprintf("[GET /identity][%d] getIdentityInvalidStorageFormat %+v", 521, o.Payload)
+}
+
+func (o *GetIdentityInvalidStorageFormat) GetPayload() models.Error {
+ return o.Payload
+}
+
+func (o *GetIdentityInvalidStorageFormat) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_ip_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_ip_parameters.go
new file mode 100644
index 000000000..2f7c23112
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_ip_parameters.go
@@ -0,0 +1,166 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package policy
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewGetIPParams creates a new GetIPParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewGetIPParams() *GetIPParams {
+ return &GetIPParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewGetIPParamsWithTimeout creates a new GetIPParams object
+// with the ability to set a timeout on a request.
+func NewGetIPParamsWithTimeout(timeout time.Duration) *GetIPParams {
+ return &GetIPParams{
+ timeout: timeout,
+ }
+}
+
+// NewGetIPParamsWithContext creates a new GetIPParams object
+// with the ability to set a context for a request.
+func NewGetIPParamsWithContext(ctx context.Context) *GetIPParams {
+ return &GetIPParams{
+ Context: ctx,
+ }
+}
+
+// NewGetIPParamsWithHTTPClient creates a new GetIPParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewGetIPParamsWithHTTPClient(client *http.Client) *GetIPParams {
+ return &GetIPParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+GetIPParams contains all the parameters to send to the API endpoint
+
+ for the get IP operation.
+
+ Typically these are written to a http.Request.
+*/
+type GetIPParams struct {
+
+ /* Cidr.
+
+ A CIDR range of IPs
+ */
+ Cidr *string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the get IP params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetIPParams) WithDefaults() *GetIPParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the get IP params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetIPParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the get IP params
+func (o *GetIPParams) WithTimeout(timeout time.Duration) *GetIPParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the get IP params
+func (o *GetIPParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the get IP params
+func (o *GetIPParams) WithContext(ctx context.Context) *GetIPParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the get IP params
+func (o *GetIPParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the get IP params
+func (o *GetIPParams) WithHTTPClient(client *http.Client) *GetIPParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the get IP params
+func (o *GetIPParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithCidr adds the cidr to the get IP params
+func (o *GetIPParams) WithCidr(cidr *string) *GetIPParams {
+ o.SetCidr(cidr)
+ return o
+}
+
+// SetCidr adds the cidr to the get IP params
+func (o *GetIPParams) SetCidr(cidr *string) {
+ o.Cidr = cidr
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *GetIPParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ if o.Cidr != nil {
+
+ // query param cidr
+ var qrCidr string
+
+ if o.Cidr != nil {
+ qrCidr = *o.Cidr
+ }
+ qCidr := qrCidr
+ if qCidr != "" {
+
+ if err := r.SetQueryParam("cidr", qCidr); err != nil {
+ return err
+ }
+ }
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_ip_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_ip_responses.go
new file mode 100644
index 000000000..37db9ccd2
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_ip_responses.go
@@ -0,0 +1,223 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package policy
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/cilium/cilium/api/v1/models"
+)
+
+// GetIPReader is a Reader for the GetIP structure.
+type GetIPReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *GetIPReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewGetIPOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 400:
+ result := NewGetIPBadRequest()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ case 404:
+ result := NewGetIPNotFound()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ default:
+ return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code())
+ }
+}
+
+// NewGetIPOK creates a GetIPOK with default headers values
+func NewGetIPOK() *GetIPOK {
+ return &GetIPOK{}
+}
+
+/*
+GetIPOK describes a response with status code 200, with default header values.
+
+Success
+*/
+type GetIPOK struct {
+ Payload []*models.IPListEntry
+}
+
+// IsSuccess returns true when this get Ip o k response has a 2xx status code
+func (o *GetIPOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this get Ip o k response has a 3xx status code
+func (o *GetIPOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get Ip o k response has a 4xx status code
+func (o *GetIPOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get Ip o k response has a 5xx status code
+func (o *GetIPOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get Ip o k response a status code equal to that given
+func (o *GetIPOK) IsCode(code int) bool {
+ return code == 200
+}
+
+func (o *GetIPOK) Error() string {
+ return fmt.Sprintf("[GET /ip][%d] getIpOK %+v", 200, o.Payload)
+}
+
+func (o *GetIPOK) String() string {
+ return fmt.Sprintf("[GET /ip][%d] getIpOK %+v", 200, o.Payload)
+}
+
+func (o *GetIPOK) GetPayload() []*models.IPListEntry {
+ return o.Payload
+}
+
+func (o *GetIPOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewGetIPBadRequest creates a GetIPBadRequest with default headers values
+func NewGetIPBadRequest() *GetIPBadRequest {
+ return &GetIPBadRequest{}
+}
+
+/*
+GetIPBadRequest describes a response with status code 400, with default header values.
+
+Invalid request (error parsing parameters)
+*/
+type GetIPBadRequest struct {
+ Payload models.Error
+}
+
+// IsSuccess returns true when this get Ip bad request response has a 2xx status code
+func (o *GetIPBadRequest) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this get Ip bad request response has a 3xx status code
+func (o *GetIPBadRequest) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get Ip bad request response has a 4xx status code
+func (o *GetIPBadRequest) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this get Ip bad request response has a 5xx status code
+func (o *GetIPBadRequest) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get Ip bad request response a status code equal to that given
+func (o *GetIPBadRequest) IsCode(code int) bool {
+ return code == 400
+}
+
+func (o *GetIPBadRequest) Error() string {
+ return fmt.Sprintf("[GET /ip][%d] getIpBadRequest %+v", 400, o.Payload)
+}
+
+func (o *GetIPBadRequest) String() string {
+ return fmt.Sprintf("[GET /ip][%d] getIpBadRequest %+v", 400, o.Payload)
+}
+
+func (o *GetIPBadRequest) GetPayload() models.Error {
+ return o.Payload
+}
+
+func (o *GetIPBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewGetIPNotFound creates a GetIPNotFound with default headers values
+func NewGetIPNotFound() *GetIPNotFound {
+ return &GetIPNotFound{}
+}
+
+/*
+GetIPNotFound describes a response with status code 404, with default header values.
+
+No IP cache entries with provided parameters found
+*/
+type GetIPNotFound struct {
+}
+
+// IsSuccess returns true when this get Ip not found response has a 2xx status code
+func (o *GetIPNotFound) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this get Ip not found response has a 3xx status code
+func (o *GetIPNotFound) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get Ip not found response has a 4xx status code
+func (o *GetIPNotFound) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this get Ip not found response has a 5xx status code
+func (o *GetIPNotFound) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get Ip not found response a status code equal to that given
+func (o *GetIPNotFound) IsCode(code int) bool {
+ return code == 404
+}
+
+func (o *GetIPNotFound) Error() string {
+ return fmt.Sprintf("[GET /ip][%d] getIpNotFound ", 404)
+}
+
+func (o *GetIPNotFound) String() string {
+ return fmt.Sprintf("[GET /ip][%d] getIpNotFound ", 404)
+}
+
+func (o *GetIPNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_policy_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_policy_parameters.go
new file mode 100644
index 000000000..33c66c92a
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_policy_parameters.go
@@ -0,0 +1,153 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package policy
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/cilium/cilium/api/v1/models"
+)
+
+// NewGetPolicyParams creates a new GetPolicyParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewGetPolicyParams() *GetPolicyParams {
+ return &GetPolicyParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewGetPolicyParamsWithTimeout creates a new GetPolicyParams object
+// with the ability to set a timeout on a request.
+func NewGetPolicyParamsWithTimeout(timeout time.Duration) *GetPolicyParams {
+ return &GetPolicyParams{
+ timeout: timeout,
+ }
+}
+
+// NewGetPolicyParamsWithContext creates a new GetPolicyParams object
+// with the ability to set a context for a request.
+func NewGetPolicyParamsWithContext(ctx context.Context) *GetPolicyParams {
+ return &GetPolicyParams{
+ Context: ctx,
+ }
+}
+
+// NewGetPolicyParamsWithHTTPClient creates a new GetPolicyParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewGetPolicyParamsWithHTTPClient(client *http.Client) *GetPolicyParams {
+ return &GetPolicyParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+GetPolicyParams contains all the parameters to send to the API endpoint
+
+ for the get policy operation.
+
+ Typically these are written to a http.Request.
+*/
+type GetPolicyParams struct {
+
+ // Labels.
+ Labels models.Labels
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the get policy params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetPolicyParams) WithDefaults() *GetPolicyParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the get policy params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetPolicyParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the get policy params
+func (o *GetPolicyParams) WithTimeout(timeout time.Duration) *GetPolicyParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the get policy params
+func (o *GetPolicyParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the get policy params
+func (o *GetPolicyParams) WithContext(ctx context.Context) *GetPolicyParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the get policy params
+func (o *GetPolicyParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the get policy params
+func (o *GetPolicyParams) WithHTTPClient(client *http.Client) *GetPolicyParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the get policy params
+func (o *GetPolicyParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithLabels adds the labels to the get policy params
+func (o *GetPolicyParams) WithLabels(labels models.Labels) *GetPolicyParams {
+ o.SetLabels(labels)
+ return o
+}
+
+// SetLabels adds the labels to the get policy params
+func (o *GetPolicyParams) SetLabels(labels models.Labels) {
+ o.Labels = labels
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *GetPolicyParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+ if o.Labels != nil {
+ if err := r.SetBodyParam(o.Labels); err != nil {
+ return err
+ }
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_policy_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_policy_responses.go
new file mode 100644
index 000000000..cba45439d
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_policy_responses.go
@@ -0,0 +1,158 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package policy
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/cilium/cilium/api/v1/models"
+)
+
+// GetPolicyReader is a Reader for the GetPolicy structure.
+type GetPolicyReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *GetPolicyReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewGetPolicyOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 404:
+ result := NewGetPolicyNotFound()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ default:
+ return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code())
+ }
+}
+
+// NewGetPolicyOK creates a GetPolicyOK with default headers values
+func NewGetPolicyOK() *GetPolicyOK {
+ return &GetPolicyOK{}
+}
+
+/*
+GetPolicyOK describes a response with status code 200, with default header values.
+
+Success
+*/
+type GetPolicyOK struct {
+ Payload *models.Policy
+}
+
+// IsSuccess returns true when this get policy o k response has a 2xx status code
+func (o *GetPolicyOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this get policy o k response has a 3xx status code
+func (o *GetPolicyOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get policy o k response has a 4xx status code
+func (o *GetPolicyOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get policy o k response has a 5xx status code
+func (o *GetPolicyOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get policy o k response a status code equal to that given
+func (o *GetPolicyOK) IsCode(code int) bool {
+ return code == 200
+}
+
+func (o *GetPolicyOK) Error() string {
+ return fmt.Sprintf("[GET /policy][%d] getPolicyOK %+v", 200, o.Payload)
+}
+
+func (o *GetPolicyOK) String() string {
+ return fmt.Sprintf("[GET /policy][%d] getPolicyOK %+v", 200, o.Payload)
+}
+
+func (o *GetPolicyOK) GetPayload() *models.Policy {
+ return o.Payload
+}
+
+func (o *GetPolicyOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ o.Payload = new(models.Policy)
+
+ // response payload
+ if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewGetPolicyNotFound creates a GetPolicyNotFound with default headers values
+func NewGetPolicyNotFound() *GetPolicyNotFound {
+ return &GetPolicyNotFound{}
+}
+
+/*
+GetPolicyNotFound describes a response with status code 404, with default header values.
+
+No policy rules found
+*/
+type GetPolicyNotFound struct {
+}
+
+// IsSuccess returns true when this get policy not found response has a 2xx status code
+func (o *GetPolicyNotFound) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this get policy not found response has a 3xx status code
+func (o *GetPolicyNotFound) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get policy not found response has a 4xx status code
+func (o *GetPolicyNotFound) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this get policy not found response has a 5xx status code
+func (o *GetPolicyNotFound) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get policy not found response a status code equal to that given
+func (o *GetPolicyNotFound) IsCode(code int) bool {
+ return code == 404
+}
+
+func (o *GetPolicyNotFound) Error() string {
+ return fmt.Sprintf("[GET /policy][%d] getPolicyNotFound ", 404)
+}
+
+func (o *GetPolicyNotFound) String() string {
+ return fmt.Sprintf("[GET /policy][%d] getPolicyNotFound ", 404)
+}
+
+func (o *GetPolicyNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_policy_selectors_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_policy_selectors_parameters.go
new file mode 100644
index 000000000..388f81f59
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_policy_selectors_parameters.go
@@ -0,0 +1,131 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package policy
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewGetPolicySelectorsParams creates a new GetPolicySelectorsParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewGetPolicySelectorsParams() *GetPolicySelectorsParams {
+ return &GetPolicySelectorsParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewGetPolicySelectorsParamsWithTimeout creates a new GetPolicySelectorsParams object
+// with the ability to set a timeout on a request.
+func NewGetPolicySelectorsParamsWithTimeout(timeout time.Duration) *GetPolicySelectorsParams {
+ return &GetPolicySelectorsParams{
+ timeout: timeout,
+ }
+}
+
+// NewGetPolicySelectorsParamsWithContext creates a new GetPolicySelectorsParams object
+// with the ability to set a context for a request.
+func NewGetPolicySelectorsParamsWithContext(ctx context.Context) *GetPolicySelectorsParams {
+ return &GetPolicySelectorsParams{
+ Context: ctx,
+ }
+}
+
+// NewGetPolicySelectorsParamsWithHTTPClient creates a new GetPolicySelectorsParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewGetPolicySelectorsParamsWithHTTPClient(client *http.Client) *GetPolicySelectorsParams {
+ return &GetPolicySelectorsParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+GetPolicySelectorsParams contains all the parameters to send to the API endpoint
+
+ for the get policy selectors operation.
+
+ Typically these are written to a http.Request.
+*/
+type GetPolicySelectorsParams struct {
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the get policy selectors params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetPolicySelectorsParams) WithDefaults() *GetPolicySelectorsParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the get policy selectors params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetPolicySelectorsParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the get policy selectors params
+func (o *GetPolicySelectorsParams) WithTimeout(timeout time.Duration) *GetPolicySelectorsParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the get policy selectors params
+func (o *GetPolicySelectorsParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the get policy selectors params
+func (o *GetPolicySelectorsParams) WithContext(ctx context.Context) *GetPolicySelectorsParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the get policy selectors params
+func (o *GetPolicySelectorsParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the get policy selectors params
+func (o *GetPolicySelectorsParams) WithHTTPClient(client *http.Client) *GetPolicySelectorsParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the get policy selectors params
+func (o *GetPolicySelectorsParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *GetPolicySelectorsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_policy_selectors_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_policy_selectors_responses.go
new file mode 100644
index 000000000..a1cbec4a3
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_policy_selectors_responses.go
@@ -0,0 +1,99 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package policy
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/cilium/cilium/api/v1/models"
+)
+
+// GetPolicySelectorsReader is a Reader for the GetPolicySelectors structure.
+type GetPolicySelectorsReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *GetPolicySelectorsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewGetPolicySelectorsOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code())
+ }
+}
+
+// NewGetPolicySelectorsOK creates a GetPolicySelectorsOK with default headers values
+func NewGetPolicySelectorsOK() *GetPolicySelectorsOK {
+ return &GetPolicySelectorsOK{}
+}
+
+/*
+GetPolicySelectorsOK describes a response with status code 200, with default header values.
+
+Success
+*/
+type GetPolicySelectorsOK struct {
+ Payload models.SelectorCache
+}
+
+// IsSuccess returns true when this get policy selectors o k response has a 2xx status code
+func (o *GetPolicySelectorsOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this get policy selectors o k response has a 3xx status code
+func (o *GetPolicySelectorsOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get policy selectors o k response has a 4xx status code
+func (o *GetPolicySelectorsOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get policy selectors o k response has a 5xx status code
+func (o *GetPolicySelectorsOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get policy selectors o k response a status code equal to that given
+func (o *GetPolicySelectorsOK) IsCode(code int) bool {
+ return code == 200
+}
+
+func (o *GetPolicySelectorsOK) Error() string {
+ return fmt.Sprintf("[GET /policy/selectors][%d] getPolicySelectorsOK %+v", 200, o.Payload)
+}
+
+func (o *GetPolicySelectorsOK) String() string {
+ return fmt.Sprintf("[GET /policy/selectors][%d] getPolicySelectorsOK %+v", 200, o.Payload)
+}
+
+func (o *GetPolicySelectorsOK) GetPayload() models.SelectorCache {
+ return o.Payload
+}
+
+func (o *GetPolicySelectorsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/policy/policy_client.go b/vendor/github.com/cilium/cilium/api/v1/client/policy/policy_client.go
new file mode 100644
index 000000000..c72927034
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/policy/policy_client.go
@@ -0,0 +1,548 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package policy
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+)
+
+// New creates a new policy API client.
+func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService {
+ return &Client{transport: transport, formats: formats}
+}
+
+/*
+Client for policy API
+*/
+type Client struct {
+ transport runtime.ClientTransport
+ formats strfmt.Registry
+}
+
+// ClientOption is the option for Client methods
+type ClientOption func(*runtime.ClientOperation)
+
+// ClientService is the interface for Client methods
+type ClientService interface {
+ DeleteFqdnCache(params *DeleteFqdnCacheParams, opts ...ClientOption) (*DeleteFqdnCacheOK, error)
+
+ DeletePolicy(params *DeletePolicyParams, opts ...ClientOption) (*DeletePolicyOK, error)
+
+ GetFqdnCache(params *GetFqdnCacheParams, opts ...ClientOption) (*GetFqdnCacheOK, error)
+
+ GetFqdnCacheID(params *GetFqdnCacheIDParams, opts ...ClientOption) (*GetFqdnCacheIDOK, error)
+
+ GetFqdnNames(params *GetFqdnNamesParams, opts ...ClientOption) (*GetFqdnNamesOK, error)
+
+ GetIP(params *GetIPParams, opts ...ClientOption) (*GetIPOK, error)
+
+ GetIdentity(params *GetIdentityParams, opts ...ClientOption) (*GetIdentityOK, error)
+
+ GetIdentityEndpoints(params *GetIdentityEndpointsParams, opts ...ClientOption) (*GetIdentityEndpointsOK, error)
+
+ GetIdentityID(params *GetIdentityIDParams, opts ...ClientOption) (*GetIdentityIDOK, error)
+
+ GetPolicy(params *GetPolicyParams, opts ...ClientOption) (*GetPolicyOK, error)
+
+ GetPolicySelectors(params *GetPolicySelectorsParams, opts ...ClientOption) (*GetPolicySelectorsOK, error)
+
+ PutPolicy(params *PutPolicyParams, opts ...ClientOption) (*PutPolicyOK, error)
+
+ SetTransport(transport runtime.ClientTransport)
+}
+
+/*
+ DeleteFqdnCache deletes matching DNS lookups from the policy generation cache
+
+ Deletes matching DNS lookups from the cache, optionally restricted by
+
+DNS name. The removed IP data will no longer be used in generated
+policies.
+*/
+func (a *Client) DeleteFqdnCache(params *DeleteFqdnCacheParams, opts ...ClientOption) (*DeleteFqdnCacheOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewDeleteFqdnCacheParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "DeleteFqdnCache",
+ Method: "DELETE",
+ PathPattern: "/fqdn/cache",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &DeleteFqdnCacheReader{formats: a.formats},
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*DeleteFqdnCacheOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for DeleteFqdnCache: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+/*
+DeletePolicy deletes a policy sub tree
+*/
+func (a *Client) DeletePolicy(params *DeletePolicyParams, opts ...ClientOption) (*DeletePolicyOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewDeletePolicyParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "DeletePolicy",
+ Method: "DELETE",
+ PathPattern: "/policy",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &DeletePolicyReader{formats: a.formats},
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*DeletePolicyOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for DeletePolicy: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+/*
+ GetFqdnCache retrieves the list of DNS lookups intercepted from all endpoints
+
+ Retrieves the list of DNS lookups intercepted from endpoints,
+
+optionally filtered by DNS name, CIDR IP range or source.
+*/
+func (a *Client) GetFqdnCache(params *GetFqdnCacheParams, opts ...ClientOption) (*GetFqdnCacheOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewGetFqdnCacheParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "GetFqdnCache",
+ Method: "GET",
+ PathPattern: "/fqdn/cache",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &GetFqdnCacheReader{formats: a.formats},
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*GetFqdnCacheOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for GetFqdnCache: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+/*
+ GetFqdnCacheID retrieves the list of DNS lookups intercepted from an endpoint
+
+ Retrieves the list of DNS lookups intercepted from the specific endpoint,
+
+optionally filtered by endpoint id, DNS name, CIDR IP range or source.
+*/
+func (a *Client) GetFqdnCacheID(params *GetFqdnCacheIDParams, opts ...ClientOption) (*GetFqdnCacheIDOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewGetFqdnCacheIDParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "GetFqdnCacheID",
+ Method: "GET",
+ PathPattern: "/fqdn/cache/{id}",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &GetFqdnCacheIDReader{formats: a.formats},
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*GetFqdnCacheIDOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for GetFqdnCacheID: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+/*
+ GetFqdnNames lists internal DNS selector representations
+
+ Retrieves the list of DNS-related fields (names to poll, selectors and
+
+their corresponding regexes).
+*/
+func (a *Client) GetFqdnNames(params *GetFqdnNamesParams, opts ...ClientOption) (*GetFqdnNamesOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewGetFqdnNamesParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "GetFqdnNames",
+ Method: "GET",
+ PathPattern: "/fqdn/names",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &GetFqdnNamesReader{formats: a.formats},
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*GetFqdnNamesOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for GetFqdnNames: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+/*
+ GetIP lists information about known IP addresses
+
+ Retrieves a list of IPs with known associated information such as
+
+their identities, host addresses, Kubernetes pod names, etc.
+The list can optionally filtered by a CIDR IP range.
+*/
+func (a *Client) GetIP(params *GetIPParams, opts ...ClientOption) (*GetIPOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewGetIPParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "GetIP",
+ Method: "GET",
+ PathPattern: "/ip",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &GetIPReader{formats: a.formats},
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*GetIPOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for GetIP: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+/*
+GetIdentity retrieves a list of identities that have metadata matching the provided parameters
+
+Retrieves a list of identities that have metadata matching the provided parameters, or all identities if no parameters are provided.
+*/
+func (a *Client) GetIdentity(params *GetIdentityParams, opts ...ClientOption) (*GetIdentityOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewGetIdentityParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "GetIdentity",
+ Method: "GET",
+ PathPattern: "/identity",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &GetIdentityReader{formats: a.formats},
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*GetIdentityOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for GetIdentity: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+/*
+GetIdentityEndpoints retrieves identities which are being used by local endpoints
+*/
+func (a *Client) GetIdentityEndpoints(params *GetIdentityEndpointsParams, opts ...ClientOption) (*GetIdentityEndpointsOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewGetIdentityEndpointsParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "GetIdentityEndpoints",
+ Method: "GET",
+ PathPattern: "/identity/endpoints",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &GetIdentityEndpointsReader{formats: a.formats},
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*GetIdentityEndpointsOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for GetIdentityEndpoints: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+/*
+GetIdentityID retrieves identity
+*/
+func (a *Client) GetIdentityID(params *GetIdentityIDParams, opts ...ClientOption) (*GetIdentityIDOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewGetIdentityIDParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "GetIdentityID",
+ Method: "GET",
+ PathPattern: "/identity/{id}",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &GetIdentityIDReader{formats: a.formats},
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*GetIdentityIDOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for GetIdentityID: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+/*
+GetPolicy retrieves entire policy tree
+
+Returns the entire policy tree with all children.
+*/
+func (a *Client) GetPolicy(params *GetPolicyParams, opts ...ClientOption) (*GetPolicyOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewGetPolicyParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "GetPolicy",
+ Method: "GET",
+ PathPattern: "/policy",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &GetPolicyReader{formats: a.formats},
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*GetPolicyOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for GetPolicy: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+/*
+GetPolicySelectors sees what selectors match which identities
+*/
+func (a *Client) GetPolicySelectors(params *GetPolicySelectorsParams, opts ...ClientOption) (*GetPolicySelectorsOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewGetPolicySelectorsParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "GetPolicySelectors",
+ Method: "GET",
+ PathPattern: "/policy/selectors",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &GetPolicySelectorsReader{formats: a.formats},
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*GetPolicySelectorsOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for GetPolicySelectors: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+/*
+PutPolicy creates or update a policy sub tree
+*/
+func (a *Client) PutPolicy(params *PutPolicyParams, opts ...ClientOption) (*PutPolicyOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewPutPolicyParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "PutPolicy",
+ Method: "PUT",
+ PathPattern: "/policy",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &PutPolicyReader{formats: a.formats},
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*PutPolicyOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for PutPolicy: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+// SetTransport changes the transport on the client
+func (a *Client) SetTransport(transport runtime.ClientTransport) {
+ a.transport = transport
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/policy/put_policy_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/policy/put_policy_parameters.go
new file mode 100644
index 000000000..ef09ec796
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/policy/put_policy_parameters.go
@@ -0,0 +1,232 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package policy
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+)
+
+// NewPutPolicyParams creates a new PutPolicyParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewPutPolicyParams() *PutPolicyParams {
+ return &PutPolicyParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewPutPolicyParamsWithTimeout creates a new PutPolicyParams object
+// with the ability to set a timeout on a request.
+func NewPutPolicyParamsWithTimeout(timeout time.Duration) *PutPolicyParams {
+ return &PutPolicyParams{
+ timeout: timeout,
+ }
+}
+
+// NewPutPolicyParamsWithContext creates a new PutPolicyParams object
+// with the ability to set a context for a request.
+func NewPutPolicyParamsWithContext(ctx context.Context) *PutPolicyParams {
+ return &PutPolicyParams{
+ Context: ctx,
+ }
+}
+
+// NewPutPolicyParamsWithHTTPClient creates a new PutPolicyParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewPutPolicyParamsWithHTTPClient(client *http.Client) *PutPolicyParams {
+ return &PutPolicyParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+PutPolicyParams contains all the parameters to send to the API endpoint
+
+ for the put policy operation.
+
+ Typically these are written to a http.Request.
+*/
+type PutPolicyParams struct {
+
+ /* Policy.
+
+ Policy rules
+ */
+ Policy string
+
+ /* Replace.
+
+ If true, indicates that existing rules with identical labels should be replaced.
+ */
+ Replace *bool
+
+ /* ReplaceWithLabels.
+
+ If present, indicates that existing rules with the given labels should be deleted.
+ */
+ ReplaceWithLabels []string
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the put policy params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *PutPolicyParams) WithDefaults() *PutPolicyParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the put policy params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *PutPolicyParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the put policy params
+func (o *PutPolicyParams) WithTimeout(timeout time.Duration) *PutPolicyParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the put policy params
+func (o *PutPolicyParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the put policy params
+func (o *PutPolicyParams) WithContext(ctx context.Context) *PutPolicyParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the put policy params
+func (o *PutPolicyParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the put policy params
+func (o *PutPolicyParams) WithHTTPClient(client *http.Client) *PutPolicyParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the put policy params
+func (o *PutPolicyParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithPolicy adds the policy to the put policy params
+func (o *PutPolicyParams) WithPolicy(policy string) *PutPolicyParams {
+ o.SetPolicy(policy)
+ return o
+}
+
+// SetPolicy adds the policy to the put policy params
+func (o *PutPolicyParams) SetPolicy(policy string) {
+ o.Policy = policy
+}
+
+// WithReplace adds the replace to the put policy params
+func (o *PutPolicyParams) WithReplace(replace *bool) *PutPolicyParams {
+ o.SetReplace(replace)
+ return o
+}
+
+// SetReplace adds the replace to the put policy params
+func (o *PutPolicyParams) SetReplace(replace *bool) {
+ o.Replace = replace
+}
+
+// WithReplaceWithLabels adds the replaceWithLabels to the put policy params
+func (o *PutPolicyParams) WithReplaceWithLabels(replaceWithLabels []string) *PutPolicyParams {
+ o.SetReplaceWithLabels(replaceWithLabels)
+ return o
+}
+
+// SetReplaceWithLabels adds the replaceWithLabels to the put policy params
+func (o *PutPolicyParams) SetReplaceWithLabels(replaceWithLabels []string) {
+ o.ReplaceWithLabels = replaceWithLabels
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *PutPolicyParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+ if err := r.SetBodyParam(o.Policy); err != nil {
+ return err
+ }
+
+ if o.Replace != nil {
+
+ // query param replace
+ var qrReplace bool
+
+ if o.Replace != nil {
+ qrReplace = *o.Replace
+ }
+ qReplace := swag.FormatBool(qrReplace)
+ if qReplace != "" {
+
+ if err := r.SetQueryParam("replace", qReplace); err != nil {
+ return err
+ }
+ }
+ }
+
+ if o.ReplaceWithLabels != nil {
+
+ // binding items for replace-with-labels
+ joinedReplaceWithLabels := o.bindParamReplaceWithLabels(reg)
+
+ // query array param replace-with-labels
+ if err := r.SetQueryParam("replace-with-labels", joinedReplaceWithLabels...); err != nil {
+ return err
+ }
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+// bindParamPutPolicy binds the parameter replace-with-labels
+func (o *PutPolicyParams) bindParamReplaceWithLabels(formats strfmt.Registry) []string {
+ replaceWithLabelsIR := o.ReplaceWithLabels
+
+ var replaceWithLabelsIC []string
+ for _, replaceWithLabelsIIR := range replaceWithLabelsIR { // explode []string
+
+ replaceWithLabelsIIV := replaceWithLabelsIIR // string as string
+ replaceWithLabelsIC = append(replaceWithLabelsIC, replaceWithLabelsIIV)
+ }
+
+ // items.CollectionFormat: ""
+ replaceWithLabelsIS := swag.JoinByFormat(replaceWithLabelsIC, "")
+
+ return replaceWithLabelsIS
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/policy/put_policy_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/policy/put_policy_responses.go
new file mode 100644
index 000000000..f1104c418
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/policy/put_policy_responses.go
@@ -0,0 +1,359 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package policy
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/cilium/cilium/api/v1/models"
+)
+
+// PutPolicyReader is a Reader for the PutPolicy structure.
+type PutPolicyReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *PutPolicyReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewPutPolicyOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 400:
+ result := NewPutPolicyInvalidPolicy()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ case 403:
+ result := NewPutPolicyForbidden()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ case 460:
+ result := NewPutPolicyInvalidPath()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ case 500:
+ result := NewPutPolicyFailure()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ default:
+ return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code())
+ }
+}
+
+// NewPutPolicyOK creates a PutPolicyOK with default headers values
+func NewPutPolicyOK() *PutPolicyOK {
+ return &PutPolicyOK{}
+}
+
+/*
+PutPolicyOK describes a response with status code 200, with default header values.
+
+Success
+*/
+type PutPolicyOK struct {
+ Payload *models.Policy
+}
+
+// IsSuccess returns true when this put policy o k response has a 2xx status code
+func (o *PutPolicyOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this put policy o k response has a 3xx status code
+func (o *PutPolicyOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this put policy o k response has a 4xx status code
+func (o *PutPolicyOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this put policy o k response has a 5xx status code
+func (o *PutPolicyOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this put policy o k response a status code equal to that given
+func (o *PutPolicyOK) IsCode(code int) bool {
+ return code == 200
+}
+
+func (o *PutPolicyOK) Error() string {
+ return fmt.Sprintf("[PUT /policy][%d] putPolicyOK %+v", 200, o.Payload)
+}
+
+func (o *PutPolicyOK) String() string {
+ return fmt.Sprintf("[PUT /policy][%d] putPolicyOK %+v", 200, o.Payload)
+}
+
+func (o *PutPolicyOK) GetPayload() *models.Policy {
+ return o.Payload
+}
+
+func (o *PutPolicyOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ o.Payload = new(models.Policy)
+
+ // response payload
+ if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewPutPolicyInvalidPolicy creates a PutPolicyInvalidPolicy with default headers values
+func NewPutPolicyInvalidPolicy() *PutPolicyInvalidPolicy {
+ return &PutPolicyInvalidPolicy{}
+}
+
+/*
+PutPolicyInvalidPolicy describes a response with status code 400, with default header values.
+
+Invalid policy
+*/
+type PutPolicyInvalidPolicy struct {
+ Payload models.Error
+}
+
+// IsSuccess returns true when this put policy invalid policy response has a 2xx status code
+func (o *PutPolicyInvalidPolicy) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this put policy invalid policy response has a 3xx status code
+func (o *PutPolicyInvalidPolicy) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this put policy invalid policy response has a 4xx status code
+func (o *PutPolicyInvalidPolicy) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this put policy invalid policy response has a 5xx status code
+func (o *PutPolicyInvalidPolicy) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this put policy invalid policy response a status code equal to that given
+func (o *PutPolicyInvalidPolicy) IsCode(code int) bool {
+ return code == 400
+}
+
+func (o *PutPolicyInvalidPolicy) Error() string {
+ return fmt.Sprintf("[PUT /policy][%d] putPolicyInvalidPolicy %+v", 400, o.Payload)
+}
+
+func (o *PutPolicyInvalidPolicy) String() string {
+ return fmt.Sprintf("[PUT /policy][%d] putPolicyInvalidPolicy %+v", 400, o.Payload)
+}
+
+func (o *PutPolicyInvalidPolicy) GetPayload() models.Error {
+ return o.Payload
+}
+
+func (o *PutPolicyInvalidPolicy) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewPutPolicyForbidden creates a PutPolicyForbidden with default headers values
+func NewPutPolicyForbidden() *PutPolicyForbidden {
+ return &PutPolicyForbidden{}
+}
+
+/*
+PutPolicyForbidden describes a response with status code 403, with default header values.
+
+Forbidden
+*/
+type PutPolicyForbidden struct {
+}
+
+// IsSuccess returns true when this put policy forbidden response has a 2xx status code
+func (o *PutPolicyForbidden) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this put policy forbidden response has a 3xx status code
+func (o *PutPolicyForbidden) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this put policy forbidden response has a 4xx status code
+func (o *PutPolicyForbidden) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this put policy forbidden response has a 5xx status code
+func (o *PutPolicyForbidden) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this put policy forbidden response a status code equal to that given
+func (o *PutPolicyForbidden) IsCode(code int) bool {
+ return code == 403
+}
+
+func (o *PutPolicyForbidden) Error() string {
+ return fmt.Sprintf("[PUT /policy][%d] putPolicyForbidden ", 403)
+}
+
+func (o *PutPolicyForbidden) String() string {
+ return fmt.Sprintf("[PUT /policy][%d] putPolicyForbidden ", 403)
+}
+
+func (o *PutPolicyForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
+
+// NewPutPolicyInvalidPath creates a PutPolicyInvalidPath with default headers values
+func NewPutPolicyInvalidPath() *PutPolicyInvalidPath {
+ return &PutPolicyInvalidPath{}
+}
+
+/*
+PutPolicyInvalidPath describes a response with status code 460, with default header values.
+
+Invalid path
+*/
+type PutPolicyInvalidPath struct {
+ Payload models.Error
+}
+
+// IsSuccess returns true when this put policy invalid path response has a 2xx status code
+func (o *PutPolicyInvalidPath) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this put policy invalid path response has a 3xx status code
+func (o *PutPolicyInvalidPath) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this put policy invalid path response has a 4xx status code
+func (o *PutPolicyInvalidPath) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this put policy invalid path response has a 5xx status code
+func (o *PutPolicyInvalidPath) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this put policy invalid path response a status code equal to that given
+func (o *PutPolicyInvalidPath) IsCode(code int) bool {
+ return code == 460
+}
+
+func (o *PutPolicyInvalidPath) Error() string {
+ return fmt.Sprintf("[PUT /policy][%d] putPolicyInvalidPath %+v", 460, o.Payload)
+}
+
+func (o *PutPolicyInvalidPath) String() string {
+ return fmt.Sprintf("[PUT /policy][%d] putPolicyInvalidPath %+v", 460, o.Payload)
+}
+
+func (o *PutPolicyInvalidPath) GetPayload() models.Error {
+ return o.Payload
+}
+
+func (o *PutPolicyInvalidPath) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewPutPolicyFailure creates a PutPolicyFailure with default headers values
+func NewPutPolicyFailure() *PutPolicyFailure {
+ return &PutPolicyFailure{}
+}
+
+/*
+PutPolicyFailure describes a response with status code 500, with default header values.
+
+Policy import failed
+*/
+type PutPolicyFailure struct {
+ Payload models.Error
+}
+
+// IsSuccess returns true when this put policy failure response has a 2xx status code
+func (o *PutPolicyFailure) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this put policy failure response has a 3xx status code
+func (o *PutPolicyFailure) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this put policy failure response has a 4xx status code
+func (o *PutPolicyFailure) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this put policy failure response has a 5xx status code
+func (o *PutPolicyFailure) IsServerError() bool {
+ return true
+}
+
+// IsCode returns true when this put policy failure response a status code equal to that given
+func (o *PutPolicyFailure) IsCode(code int) bool {
+ return code == 500
+}
+
+func (o *PutPolicyFailure) Error() string {
+ return fmt.Sprintf("[PUT /policy][%d] putPolicyFailure %+v", 500, o.Payload)
+}
+
+func (o *PutPolicyFailure) String() string {
+ return fmt.Sprintf("[PUT /policy][%d] putPolicyFailure %+v", 500, o.Payload)
+}
+
+func (o *PutPolicyFailure) GetPayload() models.Error {
+ return o.Payload
+}
+
+func (o *PutPolicyFailure) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/prefilter/delete_prefilter_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/prefilter/delete_prefilter_parameters.go
new file mode 100644
index 000000000..889f2d2e9
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/prefilter/delete_prefilter_parameters.go
@@ -0,0 +1,156 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package prefilter
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/cilium/cilium/api/v1/models"
+)
+
+// NewDeletePrefilterParams creates a new DeletePrefilterParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewDeletePrefilterParams() *DeletePrefilterParams {
+ return &DeletePrefilterParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewDeletePrefilterParamsWithTimeout creates a new DeletePrefilterParams object
+// with the ability to set a timeout on a request.
+func NewDeletePrefilterParamsWithTimeout(timeout time.Duration) *DeletePrefilterParams {
+ return &DeletePrefilterParams{
+ timeout: timeout,
+ }
+}
+
+// NewDeletePrefilterParamsWithContext creates a new DeletePrefilterParams object
+// with the ability to set a context for a request.
+func NewDeletePrefilterParamsWithContext(ctx context.Context) *DeletePrefilterParams {
+ return &DeletePrefilterParams{
+ Context: ctx,
+ }
+}
+
+// NewDeletePrefilterParamsWithHTTPClient creates a new DeletePrefilterParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewDeletePrefilterParamsWithHTTPClient(client *http.Client) *DeletePrefilterParams {
+ return &DeletePrefilterParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+DeletePrefilterParams contains all the parameters to send to the API endpoint
+
+ for the delete prefilter operation.
+
+ Typically these are written to a http.Request.
+*/
+type DeletePrefilterParams struct {
+
+ /* PrefilterSpec.
+
+ List of CIDR ranges for filter table
+ */
+ PrefilterSpec *models.PrefilterSpec
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the delete prefilter params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *DeletePrefilterParams) WithDefaults() *DeletePrefilterParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the delete prefilter params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *DeletePrefilterParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the delete prefilter params
+func (o *DeletePrefilterParams) WithTimeout(timeout time.Duration) *DeletePrefilterParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the delete prefilter params
+func (o *DeletePrefilterParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the delete prefilter params
+func (o *DeletePrefilterParams) WithContext(ctx context.Context) *DeletePrefilterParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the delete prefilter params
+func (o *DeletePrefilterParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the delete prefilter params
+func (o *DeletePrefilterParams) WithHTTPClient(client *http.Client) *DeletePrefilterParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the delete prefilter params
+func (o *DeletePrefilterParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithPrefilterSpec adds the prefilterSpec to the delete prefilter params
+func (o *DeletePrefilterParams) WithPrefilterSpec(prefilterSpec *models.PrefilterSpec) *DeletePrefilterParams {
+ o.SetPrefilterSpec(prefilterSpec)
+ return o
+}
+
+// SetPrefilterSpec adds the prefilterSpec to the delete prefilter params
+func (o *DeletePrefilterParams) SetPrefilterSpec(prefilterSpec *models.PrefilterSpec) {
+ o.PrefilterSpec = prefilterSpec
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *DeletePrefilterParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+ if o.PrefilterSpec != nil {
+ if err := r.SetBodyParam(o.PrefilterSpec); err != nil {
+ return err
+ }
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/prefilter/delete_prefilter_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/prefilter/delete_prefilter_responses.go
new file mode 100644
index 000000000..0d5aea637
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/prefilter/delete_prefilter_responses.go
@@ -0,0 +1,292 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package prefilter
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/cilium/cilium/api/v1/models"
+)
+
+// DeletePrefilterReader is a Reader for the DeletePrefilter structure.
+type DeletePrefilterReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *DeletePrefilterReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewDeletePrefilterOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 403:
+ result := NewDeletePrefilterForbidden()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ case 461:
+ result := NewDeletePrefilterInvalidCIDR()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ case 500:
+ result := NewDeletePrefilterFailure()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ default:
+ return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code())
+ }
+}
+
+// NewDeletePrefilterOK creates a DeletePrefilterOK with default headers values
+func NewDeletePrefilterOK() *DeletePrefilterOK {
+ return &DeletePrefilterOK{}
+}
+
+/*
+DeletePrefilterOK describes a response with status code 200, with default header values.
+
+Deleted
+*/
+type DeletePrefilterOK struct {
+ Payload *models.Prefilter
+}
+
+// IsSuccess returns true when this delete prefilter o k response has a 2xx status code
+func (o *DeletePrefilterOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this delete prefilter o k response has a 3xx status code
+func (o *DeletePrefilterOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this delete prefilter o k response has a 4xx status code
+func (o *DeletePrefilterOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this delete prefilter o k response has a 5xx status code
+func (o *DeletePrefilterOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this delete prefilter o k response a status code equal to that given
+func (o *DeletePrefilterOK) IsCode(code int) bool {
+ return code == 200
+}
+
+func (o *DeletePrefilterOK) Error() string {
+ return fmt.Sprintf("[DELETE /prefilter][%d] deletePrefilterOK %+v", 200, o.Payload)
+}
+
+func (o *DeletePrefilterOK) String() string {
+ return fmt.Sprintf("[DELETE /prefilter][%d] deletePrefilterOK %+v", 200, o.Payload)
+}
+
+func (o *DeletePrefilterOK) GetPayload() *models.Prefilter {
+ return o.Payload
+}
+
+func (o *DeletePrefilterOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ o.Payload = new(models.Prefilter)
+
+ // response payload
+ if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewDeletePrefilterForbidden creates a DeletePrefilterForbidden with default headers values
+func NewDeletePrefilterForbidden() *DeletePrefilterForbidden {
+ return &DeletePrefilterForbidden{}
+}
+
+/*
+DeletePrefilterForbidden describes a response with status code 403, with default header values.
+
+Forbidden
+*/
+type DeletePrefilterForbidden struct {
+}
+
+// IsSuccess returns true when this delete prefilter forbidden response has a 2xx status code
+func (o *DeletePrefilterForbidden) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this delete prefilter forbidden response has a 3xx status code
+func (o *DeletePrefilterForbidden) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this delete prefilter forbidden response has a 4xx status code
+func (o *DeletePrefilterForbidden) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this delete prefilter forbidden response has a 5xx status code
+func (o *DeletePrefilterForbidden) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this delete prefilter forbidden response a status code equal to that given
+func (o *DeletePrefilterForbidden) IsCode(code int) bool {
+ return code == 403
+}
+
+func (o *DeletePrefilterForbidden) Error() string {
+ return fmt.Sprintf("[DELETE /prefilter][%d] deletePrefilterForbidden ", 403)
+}
+
+func (o *DeletePrefilterForbidden) String() string {
+ return fmt.Sprintf("[DELETE /prefilter][%d] deletePrefilterForbidden ", 403)
+}
+
+func (o *DeletePrefilterForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
+
+// NewDeletePrefilterInvalidCIDR creates a DeletePrefilterInvalidCIDR with default headers values
+func NewDeletePrefilterInvalidCIDR() *DeletePrefilterInvalidCIDR {
+ return &DeletePrefilterInvalidCIDR{}
+}
+
+/*
+DeletePrefilterInvalidCIDR describes a response with status code 461, with default header values.
+
+Invalid CIDR prefix
+*/
+type DeletePrefilterInvalidCIDR struct {
+ Payload models.Error
+}
+
+// IsSuccess returns true when this delete prefilter invalid c Id r response has a 2xx status code
+func (o *DeletePrefilterInvalidCIDR) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this delete prefilter invalid c Id r response has a 3xx status code
+func (o *DeletePrefilterInvalidCIDR) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this delete prefilter invalid c Id r response has a 4xx status code
+func (o *DeletePrefilterInvalidCIDR) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this delete prefilter invalid c Id r response has a 5xx status code
+func (o *DeletePrefilterInvalidCIDR) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this delete prefilter invalid c Id r response a status code equal to that given
+func (o *DeletePrefilterInvalidCIDR) IsCode(code int) bool {
+ return code == 461
+}
+
+func (o *DeletePrefilterInvalidCIDR) Error() string {
+ return fmt.Sprintf("[DELETE /prefilter][%d] deletePrefilterInvalidCIdR %+v", 461, o.Payload)
+}
+
+func (o *DeletePrefilterInvalidCIDR) String() string {
+ return fmt.Sprintf("[DELETE /prefilter][%d] deletePrefilterInvalidCIdR %+v", 461, o.Payload)
+}
+
+func (o *DeletePrefilterInvalidCIDR) GetPayload() models.Error {
+ return o.Payload
+}
+
+func (o *DeletePrefilterInvalidCIDR) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewDeletePrefilterFailure creates a DeletePrefilterFailure with default headers values
+func NewDeletePrefilterFailure() *DeletePrefilterFailure {
+ return &DeletePrefilterFailure{}
+}
+
+/*
+DeletePrefilterFailure describes a response with status code 500, with default header values.
+
+Prefilter delete failed
+*/
+type DeletePrefilterFailure struct {
+ Payload models.Error
+}
+
+// IsSuccess returns true when this delete prefilter failure response has a 2xx status code
+func (o *DeletePrefilterFailure) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this delete prefilter failure response has a 3xx status code
+func (o *DeletePrefilterFailure) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this delete prefilter failure response has a 4xx status code
+func (o *DeletePrefilterFailure) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this delete prefilter failure response has a 5xx status code
+func (o *DeletePrefilterFailure) IsServerError() bool {
+ return true
+}
+
+// IsCode returns true when this delete prefilter failure response a status code equal to that given
+func (o *DeletePrefilterFailure) IsCode(code int) bool {
+ return code == 500
+}
+
+func (o *DeletePrefilterFailure) Error() string {
+ return fmt.Sprintf("[DELETE /prefilter][%d] deletePrefilterFailure %+v", 500, o.Payload)
+}
+
+func (o *DeletePrefilterFailure) String() string {
+ return fmt.Sprintf("[DELETE /prefilter][%d] deletePrefilterFailure %+v", 500, o.Payload)
+}
+
+func (o *DeletePrefilterFailure) GetPayload() models.Error {
+ return o.Payload
+}
+
+func (o *DeletePrefilterFailure) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/prefilter/get_prefilter_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/prefilter/get_prefilter_parameters.go
new file mode 100644
index 000000000..40c5e7bfa
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/prefilter/get_prefilter_parameters.go
@@ -0,0 +1,131 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package prefilter
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewGetPrefilterParams creates a new GetPrefilterParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewGetPrefilterParams() *GetPrefilterParams {
+ return &GetPrefilterParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewGetPrefilterParamsWithTimeout creates a new GetPrefilterParams object
+// with the ability to set a timeout on a request.
+func NewGetPrefilterParamsWithTimeout(timeout time.Duration) *GetPrefilterParams {
+ return &GetPrefilterParams{
+ timeout: timeout,
+ }
+}
+
+// NewGetPrefilterParamsWithContext creates a new GetPrefilterParams object
+// with the ability to set a context for a request.
+func NewGetPrefilterParamsWithContext(ctx context.Context) *GetPrefilterParams {
+ return &GetPrefilterParams{
+ Context: ctx,
+ }
+}
+
+// NewGetPrefilterParamsWithHTTPClient creates a new GetPrefilterParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewGetPrefilterParamsWithHTTPClient(client *http.Client) *GetPrefilterParams {
+ return &GetPrefilterParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+GetPrefilterParams contains all the parameters to send to the API endpoint
+
+ for the get prefilter operation.
+
+ Typically these are written to a http.Request.
+*/
+type GetPrefilterParams struct {
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the get prefilter params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetPrefilterParams) WithDefaults() *GetPrefilterParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the get prefilter params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetPrefilterParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the get prefilter params
+func (o *GetPrefilterParams) WithTimeout(timeout time.Duration) *GetPrefilterParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the get prefilter params
+func (o *GetPrefilterParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the get prefilter params
+func (o *GetPrefilterParams) WithContext(ctx context.Context) *GetPrefilterParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the get prefilter params
+func (o *GetPrefilterParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the get prefilter params
+func (o *GetPrefilterParams) WithHTTPClient(client *http.Client) *GetPrefilterParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the get prefilter params
+func (o *GetPrefilterParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *GetPrefilterParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/prefilter/get_prefilter_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/prefilter/get_prefilter_responses.go
new file mode 100644
index 000000000..63e051e3a
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/prefilter/get_prefilter_responses.go
@@ -0,0 +1,168 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package prefilter
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/cilium/cilium/api/v1/models"
+)
+
+// GetPrefilterReader is a Reader for the GetPrefilter structure.
+type GetPrefilterReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *GetPrefilterReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewGetPrefilterOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 500:
+ result := NewGetPrefilterFailure()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ default:
+ return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code())
+ }
+}
+
+// NewGetPrefilterOK creates a GetPrefilterOK with default headers values
+func NewGetPrefilterOK() *GetPrefilterOK {
+ return &GetPrefilterOK{}
+}
+
+/*
+GetPrefilterOK describes a response with status code 200, with default header values.
+
+Success
+*/
+type GetPrefilterOK struct {
+ Payload *models.Prefilter
+}
+
+// IsSuccess returns true when this get prefilter o k response has a 2xx status code
+func (o *GetPrefilterOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this get prefilter o k response has a 3xx status code
+func (o *GetPrefilterOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get prefilter o k response has a 4xx status code
+func (o *GetPrefilterOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get prefilter o k response has a 5xx status code
+func (o *GetPrefilterOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get prefilter o k response a status code equal to that given
+func (o *GetPrefilterOK) IsCode(code int) bool {
+ return code == 200
+}
+
+func (o *GetPrefilterOK) Error() string {
+ return fmt.Sprintf("[GET /prefilter][%d] getPrefilterOK %+v", 200, o.Payload)
+}
+
+func (o *GetPrefilterOK) String() string {
+ return fmt.Sprintf("[GET /prefilter][%d] getPrefilterOK %+v", 200, o.Payload)
+}
+
+func (o *GetPrefilterOK) GetPayload() *models.Prefilter {
+ return o.Payload
+}
+
+func (o *GetPrefilterOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ o.Payload = new(models.Prefilter)
+
+ // response payload
+ if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewGetPrefilterFailure creates a GetPrefilterFailure with default headers values
+func NewGetPrefilterFailure() *GetPrefilterFailure {
+ return &GetPrefilterFailure{}
+}
+
+/*
+GetPrefilterFailure describes a response with status code 500, with default header values.
+
+Prefilter get failed
+*/
+type GetPrefilterFailure struct {
+ Payload models.Error
+}
+
+// IsSuccess returns true when this get prefilter failure response has a 2xx status code
+func (o *GetPrefilterFailure) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this get prefilter failure response has a 3xx status code
+func (o *GetPrefilterFailure) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get prefilter failure response has a 4xx status code
+func (o *GetPrefilterFailure) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get prefilter failure response has a 5xx status code
+func (o *GetPrefilterFailure) IsServerError() bool {
+ return true
+}
+
+// IsCode returns true when this get prefilter failure response a status code equal to that given
+func (o *GetPrefilterFailure) IsCode(code int) bool {
+ return code == 500
+}
+
+func (o *GetPrefilterFailure) Error() string {
+ return fmt.Sprintf("[GET /prefilter][%d] getPrefilterFailure %+v", 500, o.Payload)
+}
+
+func (o *GetPrefilterFailure) String() string {
+ return fmt.Sprintf("[GET /prefilter][%d] getPrefilterFailure %+v", 500, o.Payload)
+}
+
+func (o *GetPrefilterFailure) GetPayload() models.Error {
+ return o.Payload
+}
+
+func (o *GetPrefilterFailure) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/prefilter/patch_prefilter_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/prefilter/patch_prefilter_parameters.go
new file mode 100644
index 000000000..14e6a56cf
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/prefilter/patch_prefilter_parameters.go
@@ -0,0 +1,156 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package prefilter
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/cilium/cilium/api/v1/models"
+)
+
+// NewPatchPrefilterParams creates a new PatchPrefilterParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewPatchPrefilterParams() *PatchPrefilterParams {
+ return &PatchPrefilterParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewPatchPrefilterParamsWithTimeout creates a new PatchPrefilterParams object
+// with the ability to set a timeout on a request.
+func NewPatchPrefilterParamsWithTimeout(timeout time.Duration) *PatchPrefilterParams {
+ return &PatchPrefilterParams{
+ timeout: timeout,
+ }
+}
+
+// NewPatchPrefilterParamsWithContext creates a new PatchPrefilterParams object
+// with the ability to set a context for a request.
+func NewPatchPrefilterParamsWithContext(ctx context.Context) *PatchPrefilterParams {
+ return &PatchPrefilterParams{
+ Context: ctx,
+ }
+}
+
+// NewPatchPrefilterParamsWithHTTPClient creates a new PatchPrefilterParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewPatchPrefilterParamsWithHTTPClient(client *http.Client) *PatchPrefilterParams {
+ return &PatchPrefilterParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+PatchPrefilterParams contains all the parameters to send to the API endpoint
+
+ for the patch prefilter operation.
+
+ Typically these are written to a http.Request.
+*/
+type PatchPrefilterParams struct {
+
+ /* PrefilterSpec.
+
+ List of CIDR ranges for filter table
+ */
+ PrefilterSpec *models.PrefilterSpec
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the patch prefilter params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *PatchPrefilterParams) WithDefaults() *PatchPrefilterParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the patch prefilter params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *PatchPrefilterParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the patch prefilter params
+func (o *PatchPrefilterParams) WithTimeout(timeout time.Duration) *PatchPrefilterParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the patch prefilter params
+func (o *PatchPrefilterParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the patch prefilter params
+func (o *PatchPrefilterParams) WithContext(ctx context.Context) *PatchPrefilterParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the patch prefilter params
+func (o *PatchPrefilterParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the patch prefilter params
+func (o *PatchPrefilterParams) WithHTTPClient(client *http.Client) *PatchPrefilterParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the patch prefilter params
+func (o *PatchPrefilterParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithPrefilterSpec adds the prefilterSpec to the patch prefilter params
+func (o *PatchPrefilterParams) WithPrefilterSpec(prefilterSpec *models.PrefilterSpec) *PatchPrefilterParams {
+ o.SetPrefilterSpec(prefilterSpec)
+ return o
+}
+
+// SetPrefilterSpec adds the prefilterSpec to the patch prefilter params
+func (o *PatchPrefilterParams) SetPrefilterSpec(prefilterSpec *models.PrefilterSpec) {
+ o.PrefilterSpec = prefilterSpec
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *PatchPrefilterParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+ if o.PrefilterSpec != nil {
+ if err := r.SetBodyParam(o.PrefilterSpec); err != nil {
+ return err
+ }
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/prefilter/patch_prefilter_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/prefilter/patch_prefilter_responses.go
new file mode 100644
index 000000000..56db490a6
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/prefilter/patch_prefilter_responses.go
@@ -0,0 +1,292 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package prefilter
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/cilium/cilium/api/v1/models"
+)
+
+// PatchPrefilterReader is a Reader for the PatchPrefilter structure.
+type PatchPrefilterReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *PatchPrefilterReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewPatchPrefilterOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 403:
+ result := NewPatchPrefilterForbidden()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ case 461:
+ result := NewPatchPrefilterInvalidCIDR()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ case 500:
+ result := NewPatchPrefilterFailure()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ default:
+ return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code())
+ }
+}
+
+// NewPatchPrefilterOK creates a PatchPrefilterOK with default headers values
+func NewPatchPrefilterOK() *PatchPrefilterOK {
+ return &PatchPrefilterOK{}
+}
+
+/*
+PatchPrefilterOK describes a response with status code 200, with default header values.
+
+Updated
+*/
+type PatchPrefilterOK struct {
+ Payload *models.Prefilter
+}
+
+// IsSuccess returns true when this patch prefilter o k response has a 2xx status code
+func (o *PatchPrefilterOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this patch prefilter o k response has a 3xx status code
+func (o *PatchPrefilterOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this patch prefilter o k response has a 4xx status code
+func (o *PatchPrefilterOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this patch prefilter o k response has a 5xx status code
+func (o *PatchPrefilterOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this patch prefilter o k response a status code equal to that given
+func (o *PatchPrefilterOK) IsCode(code int) bool {
+ return code == 200
+}
+
+func (o *PatchPrefilterOK) Error() string {
+ return fmt.Sprintf("[PATCH /prefilter][%d] patchPrefilterOK %+v", 200, o.Payload)
+}
+
+func (o *PatchPrefilterOK) String() string {
+ return fmt.Sprintf("[PATCH /prefilter][%d] patchPrefilterOK %+v", 200, o.Payload)
+}
+
+func (o *PatchPrefilterOK) GetPayload() *models.Prefilter {
+ return o.Payload
+}
+
+func (o *PatchPrefilterOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ o.Payload = new(models.Prefilter)
+
+ // response payload
+ if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewPatchPrefilterForbidden creates a PatchPrefilterForbidden with default headers values
+func NewPatchPrefilterForbidden() *PatchPrefilterForbidden {
+ return &PatchPrefilterForbidden{}
+}
+
+/*
+PatchPrefilterForbidden describes a response with status code 403, with default header values.
+
+Forbidden
+*/
+type PatchPrefilterForbidden struct {
+}
+
+// IsSuccess returns true when this patch prefilter forbidden response has a 2xx status code
+func (o *PatchPrefilterForbidden) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this patch prefilter forbidden response has a 3xx status code
+func (o *PatchPrefilterForbidden) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this patch prefilter forbidden response has a 4xx status code
+func (o *PatchPrefilterForbidden) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this patch prefilter forbidden response has a 5xx status code
+func (o *PatchPrefilterForbidden) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this patch prefilter forbidden response a status code equal to that given
+func (o *PatchPrefilterForbidden) IsCode(code int) bool {
+ return code == 403
+}
+
+func (o *PatchPrefilterForbidden) Error() string {
+ return fmt.Sprintf("[PATCH /prefilter][%d] patchPrefilterForbidden ", 403)
+}
+
+func (o *PatchPrefilterForbidden) String() string {
+ return fmt.Sprintf("[PATCH /prefilter][%d] patchPrefilterForbidden ", 403)
+}
+
+func (o *PatchPrefilterForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
+
+// NewPatchPrefilterInvalidCIDR creates a PatchPrefilterInvalidCIDR with default headers values
+func NewPatchPrefilterInvalidCIDR() *PatchPrefilterInvalidCIDR {
+ return &PatchPrefilterInvalidCIDR{}
+}
+
+/*
+PatchPrefilterInvalidCIDR describes a response with status code 461, with default header values.
+
+Invalid CIDR prefix
+*/
+type PatchPrefilterInvalidCIDR struct {
+ Payload models.Error
+}
+
+// IsSuccess returns true when this patch prefilter invalid c Id r response has a 2xx status code
+func (o *PatchPrefilterInvalidCIDR) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this patch prefilter invalid c Id r response has a 3xx status code
+func (o *PatchPrefilterInvalidCIDR) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this patch prefilter invalid c Id r response has a 4xx status code
+func (o *PatchPrefilterInvalidCIDR) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this patch prefilter invalid c Id r response has a 5xx status code
+func (o *PatchPrefilterInvalidCIDR) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this patch prefilter invalid c Id r response a status code equal to that given
+func (o *PatchPrefilterInvalidCIDR) IsCode(code int) bool {
+ return code == 461
+}
+
+func (o *PatchPrefilterInvalidCIDR) Error() string {
+ return fmt.Sprintf("[PATCH /prefilter][%d] patchPrefilterInvalidCIdR %+v", 461, o.Payload)
+}
+
+func (o *PatchPrefilterInvalidCIDR) String() string {
+ return fmt.Sprintf("[PATCH /prefilter][%d] patchPrefilterInvalidCIdR %+v", 461, o.Payload)
+}
+
+func (o *PatchPrefilterInvalidCIDR) GetPayload() models.Error {
+ return o.Payload
+}
+
+func (o *PatchPrefilterInvalidCIDR) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewPatchPrefilterFailure creates a PatchPrefilterFailure with default headers values
+func NewPatchPrefilterFailure() *PatchPrefilterFailure {
+ return &PatchPrefilterFailure{}
+}
+
+/*
+PatchPrefilterFailure describes a response with status code 500, with default header values.
+
+Prefilter update failed
+*/
+type PatchPrefilterFailure struct {
+ Payload models.Error
+}
+
+// IsSuccess returns true when this patch prefilter failure response has a 2xx status code
+func (o *PatchPrefilterFailure) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this patch prefilter failure response has a 3xx status code
+func (o *PatchPrefilterFailure) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this patch prefilter failure response has a 4xx status code
+func (o *PatchPrefilterFailure) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this patch prefilter failure response has a 5xx status code
+func (o *PatchPrefilterFailure) IsServerError() bool {
+ return true
+}
+
+// IsCode returns true when this patch prefilter failure response a status code equal to that given
+func (o *PatchPrefilterFailure) IsCode(code int) bool {
+ return code == 500
+}
+
+func (o *PatchPrefilterFailure) Error() string {
+ return fmt.Sprintf("[PATCH /prefilter][%d] patchPrefilterFailure %+v", 500, o.Payload)
+}
+
+func (o *PatchPrefilterFailure) String() string {
+ return fmt.Sprintf("[PATCH /prefilter][%d] patchPrefilterFailure %+v", 500, o.Payload)
+}
+
+func (o *PatchPrefilterFailure) GetPayload() models.Error {
+ return o.Payload
+}
+
+func (o *PatchPrefilterFailure) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/prefilter/prefilter_client.go b/vendor/github.com/cilium/cilium/api/v1/client/prefilter/prefilter_client.go
new file mode 100644
index 000000000..c577a739a
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/prefilter/prefilter_client.go
@@ -0,0 +1,162 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package prefilter
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+)
+
+// New creates a new prefilter API client.
+func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService {
+ return &Client{transport: transport, formats: formats}
+}
+
+/*
+Client for prefilter API
+*/
+type Client struct {
+ transport runtime.ClientTransport
+ formats strfmt.Registry
+}
+
+// ClientOption is the option for Client methods
+type ClientOption func(*runtime.ClientOperation)
+
+// ClientService is the interface for Client methods
+type ClientService interface {
+ DeletePrefilter(params *DeletePrefilterParams, opts ...ClientOption) (*DeletePrefilterOK, error)
+
+ GetPrefilter(params *GetPrefilterParams, opts ...ClientOption) (*GetPrefilterOK, error)
+
+ PatchPrefilter(params *PatchPrefilterParams, opts ...ClientOption) (*PatchPrefilterOK, error)
+
+ SetTransport(transport runtime.ClientTransport)
+}
+
+/*
+DeletePrefilter deletes list of c ID rs
+*/
+func (a *Client) DeletePrefilter(params *DeletePrefilterParams, opts ...ClientOption) (*DeletePrefilterOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewDeletePrefilterParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "DeletePrefilter",
+ Method: "DELETE",
+ PathPattern: "/prefilter",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &DeletePrefilterReader{formats: a.formats},
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*DeletePrefilterOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for DeletePrefilter: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+/*
+GetPrefilter retrieves list of c ID rs
+*/
+func (a *Client) GetPrefilter(params *GetPrefilterParams, opts ...ClientOption) (*GetPrefilterOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewGetPrefilterParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "GetPrefilter",
+ Method: "GET",
+ PathPattern: "/prefilter",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &GetPrefilterReader{formats: a.formats},
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*GetPrefilterOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for GetPrefilter: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+/*
+PatchPrefilter updates list of c ID rs
+*/
+func (a *Client) PatchPrefilter(params *PatchPrefilterParams, opts ...ClientOption) (*PatchPrefilterOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewPatchPrefilterParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "PatchPrefilter",
+ Method: "PATCH",
+ PathPattern: "/prefilter",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &PatchPrefilterReader{formats: a.formats},
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*PatchPrefilterOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for PatchPrefilter: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+// SetTransport changes the transport on the client
+func (a *Client) SetTransport(transport runtime.ClientTransport) {
+ a.transport = transport
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/recorder/delete_recorder_id_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/recorder/delete_recorder_id_parameters.go
new file mode 100644
index 000000000..6a9a3f362
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/recorder/delete_recorder_id_parameters.go
@@ -0,0 +1,155 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package recorder
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+)
+
+// NewDeleteRecorderIDParams creates a new DeleteRecorderIDParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewDeleteRecorderIDParams() *DeleteRecorderIDParams {
+ return &DeleteRecorderIDParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewDeleteRecorderIDParamsWithTimeout creates a new DeleteRecorderIDParams object
+// with the ability to set a timeout on a request.
+func NewDeleteRecorderIDParamsWithTimeout(timeout time.Duration) *DeleteRecorderIDParams {
+ return &DeleteRecorderIDParams{
+ timeout: timeout,
+ }
+}
+
+// NewDeleteRecorderIDParamsWithContext creates a new DeleteRecorderIDParams object
+// with the ability to set a context for a request.
+func NewDeleteRecorderIDParamsWithContext(ctx context.Context) *DeleteRecorderIDParams {
+ return &DeleteRecorderIDParams{
+ Context: ctx,
+ }
+}
+
+// NewDeleteRecorderIDParamsWithHTTPClient creates a new DeleteRecorderIDParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewDeleteRecorderIDParamsWithHTTPClient(client *http.Client) *DeleteRecorderIDParams {
+ return &DeleteRecorderIDParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+DeleteRecorderIDParams contains all the parameters to send to the API endpoint
+
+ for the delete recorder ID operation.
+
+ Typically these are written to a http.Request.
+*/
+type DeleteRecorderIDParams struct {
+
+ /* ID.
+
+ ID of recorder
+ */
+ ID int64
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the delete recorder ID params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *DeleteRecorderIDParams) WithDefaults() *DeleteRecorderIDParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the delete recorder ID params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *DeleteRecorderIDParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the delete recorder ID params
+func (o *DeleteRecorderIDParams) WithTimeout(timeout time.Duration) *DeleteRecorderIDParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the delete recorder ID params
+func (o *DeleteRecorderIDParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the delete recorder ID params
+func (o *DeleteRecorderIDParams) WithContext(ctx context.Context) *DeleteRecorderIDParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the delete recorder ID params
+func (o *DeleteRecorderIDParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the delete recorder ID params
+func (o *DeleteRecorderIDParams) WithHTTPClient(client *http.Client) *DeleteRecorderIDParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the delete recorder ID params
+func (o *DeleteRecorderIDParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithID adds the id to the delete recorder ID params
+func (o *DeleteRecorderIDParams) WithID(id int64) *DeleteRecorderIDParams {
+ o.SetID(id)
+ return o
+}
+
+// SetID adds the id to the delete recorder ID params
+func (o *DeleteRecorderIDParams) SetID(id int64) {
+ o.ID = id
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *DeleteRecorderIDParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ // path param id
+ if err := r.SetPathParam("id", swag.FormatInt64(o.ID)); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/recorder/delete_recorder_id_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/recorder/delete_recorder_id_responses.go
new file mode 100644
index 000000000..cf7ab77a2
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/recorder/delete_recorder_id_responses.go
@@ -0,0 +1,270 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package recorder
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/cilium/cilium/api/v1/models"
+)
+
+// DeleteRecorderIDReader is a Reader for the DeleteRecorderID structure.
+type DeleteRecorderIDReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *DeleteRecorderIDReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewDeleteRecorderIDOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 403:
+ result := NewDeleteRecorderIDForbidden()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ case 404:
+ result := NewDeleteRecorderIDNotFound()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ case 500:
+ result := NewDeleteRecorderIDFailure()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ default:
+ return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code())
+ }
+}
+
+// NewDeleteRecorderIDOK creates a DeleteRecorderIDOK with default headers values
+func NewDeleteRecorderIDOK() *DeleteRecorderIDOK {
+ return &DeleteRecorderIDOK{}
+}
+
+/*
+DeleteRecorderIDOK describes a response with status code 200, with default header values.
+
+Success
+*/
+type DeleteRecorderIDOK struct {
+}
+
+// IsSuccess returns true when this delete recorder Id o k response has a 2xx status code
+func (o *DeleteRecorderIDOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this delete recorder Id o k response has a 3xx status code
+func (o *DeleteRecorderIDOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this delete recorder Id o k response has a 4xx status code
+func (o *DeleteRecorderIDOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this delete recorder Id o k response has a 5xx status code
+func (o *DeleteRecorderIDOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this delete recorder Id o k response a status code equal to that given
+func (o *DeleteRecorderIDOK) IsCode(code int) bool {
+ return code == 200
+}
+
+func (o *DeleteRecorderIDOK) Error() string {
+ return fmt.Sprintf("[DELETE /recorder/{id}][%d] deleteRecorderIdOK ", 200)
+}
+
+func (o *DeleteRecorderIDOK) String() string {
+ return fmt.Sprintf("[DELETE /recorder/{id}][%d] deleteRecorderIdOK ", 200)
+}
+
+func (o *DeleteRecorderIDOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
+
+// NewDeleteRecorderIDForbidden creates a DeleteRecorderIDForbidden with default headers values
+func NewDeleteRecorderIDForbidden() *DeleteRecorderIDForbidden {
+ return &DeleteRecorderIDForbidden{}
+}
+
+/*
+DeleteRecorderIDForbidden describes a response with status code 403, with default header values.
+
+Forbidden
+*/
+type DeleteRecorderIDForbidden struct {
+}
+
+// IsSuccess returns true when this delete recorder Id forbidden response has a 2xx status code
+func (o *DeleteRecorderIDForbidden) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this delete recorder Id forbidden response has a 3xx status code
+func (o *DeleteRecorderIDForbidden) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this delete recorder Id forbidden response has a 4xx status code
+func (o *DeleteRecorderIDForbidden) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this delete recorder Id forbidden response has a 5xx status code
+func (o *DeleteRecorderIDForbidden) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this delete recorder Id forbidden response a status code equal to that given
+func (o *DeleteRecorderIDForbidden) IsCode(code int) bool {
+ return code == 403
+}
+
+func (o *DeleteRecorderIDForbidden) Error() string {
+ return fmt.Sprintf("[DELETE /recorder/{id}][%d] deleteRecorderIdForbidden ", 403)
+}
+
+func (o *DeleteRecorderIDForbidden) String() string {
+ return fmt.Sprintf("[DELETE /recorder/{id}][%d] deleteRecorderIdForbidden ", 403)
+}
+
+func (o *DeleteRecorderIDForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
+
+// NewDeleteRecorderIDNotFound creates a DeleteRecorderIDNotFound with default headers values
+func NewDeleteRecorderIDNotFound() *DeleteRecorderIDNotFound {
+ return &DeleteRecorderIDNotFound{}
+}
+
+/*
+DeleteRecorderIDNotFound describes a response with status code 404, with default header values.
+
+Recorder not found
+*/
+type DeleteRecorderIDNotFound struct {
+}
+
+// IsSuccess returns true when this delete recorder Id not found response has a 2xx status code
+func (o *DeleteRecorderIDNotFound) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this delete recorder Id not found response has a 3xx status code
+func (o *DeleteRecorderIDNotFound) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this delete recorder Id not found response has a 4xx status code
+func (o *DeleteRecorderIDNotFound) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this delete recorder Id not found response has a 5xx status code
+func (o *DeleteRecorderIDNotFound) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this delete recorder Id not found response a status code equal to that given
+func (o *DeleteRecorderIDNotFound) IsCode(code int) bool {
+ return code == 404
+}
+
+func (o *DeleteRecorderIDNotFound) Error() string {
+ return fmt.Sprintf("[DELETE /recorder/{id}][%d] deleteRecorderIdNotFound ", 404)
+}
+
+func (o *DeleteRecorderIDNotFound) String() string {
+ return fmt.Sprintf("[DELETE /recorder/{id}][%d] deleteRecorderIdNotFound ", 404)
+}
+
+func (o *DeleteRecorderIDNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
+
+// NewDeleteRecorderIDFailure creates a DeleteRecorderIDFailure with default headers values
+func NewDeleteRecorderIDFailure() *DeleteRecorderIDFailure {
+ return &DeleteRecorderIDFailure{}
+}
+
+/*
+DeleteRecorderIDFailure describes a response with status code 500, with default header values.
+
+Recorder deletion failed
+*/
+type DeleteRecorderIDFailure struct {
+ Payload models.Error
+}
+
+// IsSuccess returns true when this delete recorder Id failure response has a 2xx status code
+func (o *DeleteRecorderIDFailure) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this delete recorder Id failure response has a 3xx status code
+func (o *DeleteRecorderIDFailure) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this delete recorder Id failure response has a 4xx status code
+func (o *DeleteRecorderIDFailure) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this delete recorder Id failure response has a 5xx status code
+func (o *DeleteRecorderIDFailure) IsServerError() bool {
+ return true
+}
+
+// IsCode returns true when this delete recorder Id failure response a status code equal to that given
+func (o *DeleteRecorderIDFailure) IsCode(code int) bool {
+ return code == 500
+}
+
+func (o *DeleteRecorderIDFailure) Error() string {
+ return fmt.Sprintf("[DELETE /recorder/{id}][%d] deleteRecorderIdFailure %+v", 500, o.Payload)
+}
+
+func (o *DeleteRecorderIDFailure) String() string {
+ return fmt.Sprintf("[DELETE /recorder/{id}][%d] deleteRecorderIdFailure %+v", 500, o.Payload)
+}
+
+func (o *DeleteRecorderIDFailure) GetPayload() models.Error {
+ return o.Payload
+}
+
+func (o *DeleteRecorderIDFailure) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/recorder/get_recorder_id_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/recorder/get_recorder_id_parameters.go
new file mode 100644
index 000000000..4aae52c7d
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/recorder/get_recorder_id_parameters.go
@@ -0,0 +1,155 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package recorder
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+)
+
+// NewGetRecorderIDParams creates a new GetRecorderIDParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewGetRecorderIDParams() *GetRecorderIDParams {
+ return &GetRecorderIDParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewGetRecorderIDParamsWithTimeout creates a new GetRecorderIDParams object
+// with the ability to set a timeout on a request.
+func NewGetRecorderIDParamsWithTimeout(timeout time.Duration) *GetRecorderIDParams {
+ return &GetRecorderIDParams{
+ timeout: timeout,
+ }
+}
+
+// NewGetRecorderIDParamsWithContext creates a new GetRecorderIDParams object
+// with the ability to set a context for a request.
+func NewGetRecorderIDParamsWithContext(ctx context.Context) *GetRecorderIDParams {
+ return &GetRecorderIDParams{
+ Context: ctx,
+ }
+}
+
+// NewGetRecorderIDParamsWithHTTPClient creates a new GetRecorderIDParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewGetRecorderIDParamsWithHTTPClient(client *http.Client) *GetRecorderIDParams {
+ return &GetRecorderIDParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+GetRecorderIDParams contains all the parameters to send to the API endpoint
+
+ for the get recorder ID operation.
+
+ Typically these are written to a http.Request.
+*/
+type GetRecorderIDParams struct {
+
+ /* ID.
+
+ ID of recorder
+ */
+ ID int64
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the get recorder ID params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetRecorderIDParams) WithDefaults() *GetRecorderIDParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the get recorder ID params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetRecorderIDParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the get recorder ID params
+func (o *GetRecorderIDParams) WithTimeout(timeout time.Duration) *GetRecorderIDParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the get recorder ID params
+func (o *GetRecorderIDParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the get recorder ID params
+func (o *GetRecorderIDParams) WithContext(ctx context.Context) *GetRecorderIDParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the get recorder ID params
+func (o *GetRecorderIDParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the get recorder ID params
+func (o *GetRecorderIDParams) WithHTTPClient(client *http.Client) *GetRecorderIDParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the get recorder ID params
+func (o *GetRecorderIDParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithID adds the id to the get recorder ID params
+func (o *GetRecorderIDParams) WithID(id int64) *GetRecorderIDParams {
+ o.SetID(id)
+ return o
+}
+
+// SetID adds the id to the get recorder ID params
+func (o *GetRecorderIDParams) SetID(id int64) {
+ o.ID = id
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *GetRecorderIDParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ // path param id
+ if err := r.SetPathParam("id", swag.FormatInt64(o.ID)); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/recorder/get_recorder_id_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/recorder/get_recorder_id_responses.go
new file mode 100644
index 000000000..07f4e60cd
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/recorder/get_recorder_id_responses.go
@@ -0,0 +1,158 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package recorder
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/cilium/cilium/api/v1/models"
+)
+
+// GetRecorderIDReader is a Reader for the GetRecorderID structure.
+type GetRecorderIDReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *GetRecorderIDReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewGetRecorderIDOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 404:
+ result := NewGetRecorderIDNotFound()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ default:
+ return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code())
+ }
+}
+
+// NewGetRecorderIDOK creates a GetRecorderIDOK with default headers values
+func NewGetRecorderIDOK() *GetRecorderIDOK {
+ return &GetRecorderIDOK{}
+}
+
+/*
+GetRecorderIDOK describes a response with status code 200, with default header values.
+
+Success
+*/
+type GetRecorderIDOK struct {
+ Payload *models.Recorder
+}
+
+// IsSuccess returns true when this get recorder Id o k response has a 2xx status code
+func (o *GetRecorderIDOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this get recorder Id o k response has a 3xx status code
+func (o *GetRecorderIDOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get recorder Id o k response has a 4xx status code
+func (o *GetRecorderIDOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get recorder Id o k response has a 5xx status code
+func (o *GetRecorderIDOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get recorder Id o k response a status code equal to that given
+func (o *GetRecorderIDOK) IsCode(code int) bool {
+ return code == 200
+}
+
+func (o *GetRecorderIDOK) Error() string {
+ return fmt.Sprintf("[GET /recorder/{id}][%d] getRecorderIdOK %+v", 200, o.Payload)
+}
+
+func (o *GetRecorderIDOK) String() string {
+ return fmt.Sprintf("[GET /recorder/{id}][%d] getRecorderIdOK %+v", 200, o.Payload)
+}
+
+func (o *GetRecorderIDOK) GetPayload() *models.Recorder {
+ return o.Payload
+}
+
+func (o *GetRecorderIDOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ o.Payload = new(models.Recorder)
+
+ // response payload
+ if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewGetRecorderIDNotFound creates a GetRecorderIDNotFound with default headers values
+func NewGetRecorderIDNotFound() *GetRecorderIDNotFound {
+ return &GetRecorderIDNotFound{}
+}
+
+/*
+GetRecorderIDNotFound describes a response with status code 404, with default header values.
+
+Recorder not found
+*/
+type GetRecorderIDNotFound struct {
+}
+
+// IsSuccess returns true when this get recorder Id not found response has a 2xx status code
+func (o *GetRecorderIDNotFound) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this get recorder Id not found response has a 3xx status code
+func (o *GetRecorderIDNotFound) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get recorder Id not found response has a 4xx status code
+func (o *GetRecorderIDNotFound) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this get recorder Id not found response has a 5xx status code
+func (o *GetRecorderIDNotFound) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get recorder Id not found response a status code equal to that given
+func (o *GetRecorderIDNotFound) IsCode(code int) bool {
+ return code == 404
+}
+
+func (o *GetRecorderIDNotFound) Error() string {
+ return fmt.Sprintf("[GET /recorder/{id}][%d] getRecorderIdNotFound ", 404)
+}
+
+func (o *GetRecorderIDNotFound) String() string {
+ return fmt.Sprintf("[GET /recorder/{id}][%d] getRecorderIdNotFound ", 404)
+}
+
+func (o *GetRecorderIDNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/recorder/get_recorder_masks_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/recorder/get_recorder_masks_parameters.go
new file mode 100644
index 000000000..4dd487344
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/recorder/get_recorder_masks_parameters.go
@@ -0,0 +1,131 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package recorder
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewGetRecorderMasksParams creates a new GetRecorderMasksParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewGetRecorderMasksParams() *GetRecorderMasksParams {
+ return &GetRecorderMasksParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewGetRecorderMasksParamsWithTimeout creates a new GetRecorderMasksParams object
+// with the ability to set a timeout on a request.
+func NewGetRecorderMasksParamsWithTimeout(timeout time.Duration) *GetRecorderMasksParams {
+ return &GetRecorderMasksParams{
+ timeout: timeout,
+ }
+}
+
+// NewGetRecorderMasksParamsWithContext creates a new GetRecorderMasksParams object
+// with the ability to set a context for a request.
+func NewGetRecorderMasksParamsWithContext(ctx context.Context) *GetRecorderMasksParams {
+ return &GetRecorderMasksParams{
+ Context: ctx,
+ }
+}
+
+// NewGetRecorderMasksParamsWithHTTPClient creates a new GetRecorderMasksParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewGetRecorderMasksParamsWithHTTPClient(client *http.Client) *GetRecorderMasksParams {
+ return &GetRecorderMasksParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+GetRecorderMasksParams contains all the parameters to send to the API endpoint
+
+ for the get recorder masks operation.
+
+ Typically these are written to a http.Request.
+*/
+type GetRecorderMasksParams struct {
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the get recorder masks params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetRecorderMasksParams) WithDefaults() *GetRecorderMasksParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the get recorder masks params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetRecorderMasksParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the get recorder masks params
+func (o *GetRecorderMasksParams) WithTimeout(timeout time.Duration) *GetRecorderMasksParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the get recorder masks params
+func (o *GetRecorderMasksParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the get recorder masks params
+func (o *GetRecorderMasksParams) WithContext(ctx context.Context) *GetRecorderMasksParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the get recorder masks params
+func (o *GetRecorderMasksParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the get recorder masks params
+func (o *GetRecorderMasksParams) WithHTTPClient(client *http.Client) *GetRecorderMasksParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the get recorder masks params
+func (o *GetRecorderMasksParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *GetRecorderMasksParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/recorder/get_recorder_masks_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/recorder/get_recorder_masks_responses.go
new file mode 100644
index 000000000..06b855596
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/recorder/get_recorder_masks_responses.go
@@ -0,0 +1,99 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package recorder
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/cilium/cilium/api/v1/models"
+)
+
+// GetRecorderMasksReader is a Reader for the GetRecorderMasks structure.
+type GetRecorderMasksReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *GetRecorderMasksReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewGetRecorderMasksOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code())
+ }
+}
+
+// NewGetRecorderMasksOK creates a GetRecorderMasksOK with default headers values
+func NewGetRecorderMasksOK() *GetRecorderMasksOK {
+ return &GetRecorderMasksOK{}
+}
+
+/*
+GetRecorderMasksOK describes a response with status code 200, with default header values.
+
+Success
+*/
+type GetRecorderMasksOK struct {
+ Payload []*models.RecorderMask
+}
+
+// IsSuccess returns true when this get recorder masks o k response has a 2xx status code
+func (o *GetRecorderMasksOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this get recorder masks o k response has a 3xx status code
+func (o *GetRecorderMasksOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get recorder masks o k response has a 4xx status code
+func (o *GetRecorderMasksOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get recorder masks o k response has a 5xx status code
+func (o *GetRecorderMasksOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get recorder masks o k response a status code equal to that given
+func (o *GetRecorderMasksOK) IsCode(code int) bool {
+ return code == 200
+}
+
+func (o *GetRecorderMasksOK) Error() string {
+ return fmt.Sprintf("[GET /recorder/masks][%d] getRecorderMasksOK %+v", 200, o.Payload)
+}
+
+func (o *GetRecorderMasksOK) String() string {
+ return fmt.Sprintf("[GET /recorder/masks][%d] getRecorderMasksOK %+v", 200, o.Payload)
+}
+
+func (o *GetRecorderMasksOK) GetPayload() []*models.RecorderMask {
+ return o.Payload
+}
+
+func (o *GetRecorderMasksOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/recorder/get_recorder_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/recorder/get_recorder_parameters.go
new file mode 100644
index 000000000..d7ff113b2
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/recorder/get_recorder_parameters.go
@@ -0,0 +1,131 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package recorder
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewGetRecorderParams creates a new GetRecorderParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewGetRecorderParams() *GetRecorderParams {
+ return &GetRecorderParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewGetRecorderParamsWithTimeout creates a new GetRecorderParams object
+// with the ability to set a timeout on a request.
+func NewGetRecorderParamsWithTimeout(timeout time.Duration) *GetRecorderParams {
+ return &GetRecorderParams{
+ timeout: timeout,
+ }
+}
+
+// NewGetRecorderParamsWithContext creates a new GetRecorderParams object
+// with the ability to set a context for a request.
+func NewGetRecorderParamsWithContext(ctx context.Context) *GetRecorderParams {
+ return &GetRecorderParams{
+ Context: ctx,
+ }
+}
+
+// NewGetRecorderParamsWithHTTPClient creates a new GetRecorderParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewGetRecorderParamsWithHTTPClient(client *http.Client) *GetRecorderParams {
+ return &GetRecorderParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+GetRecorderParams contains all the parameters to send to the API endpoint
+
+ for the get recorder operation.
+
+ Typically these are written to a http.Request.
+*/
+type GetRecorderParams struct {
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the get recorder params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetRecorderParams) WithDefaults() *GetRecorderParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the get recorder params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetRecorderParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the get recorder params
+func (o *GetRecorderParams) WithTimeout(timeout time.Duration) *GetRecorderParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the get recorder params
+func (o *GetRecorderParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the get recorder params
+func (o *GetRecorderParams) WithContext(ctx context.Context) *GetRecorderParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the get recorder params
+func (o *GetRecorderParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the get recorder params
+func (o *GetRecorderParams) WithHTTPClient(client *http.Client) *GetRecorderParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the get recorder params
+func (o *GetRecorderParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *GetRecorderParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/recorder/get_recorder_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/recorder/get_recorder_responses.go
new file mode 100644
index 000000000..f45762c30
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/recorder/get_recorder_responses.go
@@ -0,0 +1,99 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package recorder
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/cilium/cilium/api/v1/models"
+)
+
+// GetRecorderReader is a Reader for the GetRecorder structure.
+type GetRecorderReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *GetRecorderReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewGetRecorderOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code())
+ }
+}
+
+// NewGetRecorderOK creates a GetRecorderOK with default headers values
+func NewGetRecorderOK() *GetRecorderOK {
+ return &GetRecorderOK{}
+}
+
+/*
+GetRecorderOK describes a response with status code 200, with default header values.
+
+Success
+*/
+type GetRecorderOK struct {
+ Payload []*models.Recorder
+}
+
+// IsSuccess returns true when this get recorder o k response has a 2xx status code
+func (o *GetRecorderOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this get recorder o k response has a 3xx status code
+func (o *GetRecorderOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get recorder o k response has a 4xx status code
+func (o *GetRecorderOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get recorder o k response has a 5xx status code
+func (o *GetRecorderOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get recorder o k response a status code equal to that given
+func (o *GetRecorderOK) IsCode(code int) bool {
+ return code == 200
+}
+
+func (o *GetRecorderOK) Error() string {
+ return fmt.Sprintf("[GET /recorder][%d] getRecorderOK %+v", 200, o.Payload)
+}
+
+func (o *GetRecorderOK) String() string {
+ return fmt.Sprintf("[GET /recorder][%d] getRecorderOK %+v", 200, o.Payload)
+}
+
+func (o *GetRecorderOK) GetPayload() []*models.Recorder {
+ return o.Payload
+}
+
+func (o *GetRecorderOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/recorder/put_recorder_id_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/recorder/put_recorder_id_parameters.go
new file mode 100644
index 000000000..3515d3c76
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/recorder/put_recorder_id_parameters.go
@@ -0,0 +1,179 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package recorder
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+
+ "github.com/cilium/cilium/api/v1/models"
+)
+
+// NewPutRecorderIDParams creates a new PutRecorderIDParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewPutRecorderIDParams() *PutRecorderIDParams {
+ return &PutRecorderIDParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewPutRecorderIDParamsWithTimeout creates a new PutRecorderIDParams object
+// with the ability to set a timeout on a request.
+func NewPutRecorderIDParamsWithTimeout(timeout time.Duration) *PutRecorderIDParams {
+ return &PutRecorderIDParams{
+ timeout: timeout,
+ }
+}
+
+// NewPutRecorderIDParamsWithContext creates a new PutRecorderIDParams object
+// with the ability to set a context for a request.
+func NewPutRecorderIDParamsWithContext(ctx context.Context) *PutRecorderIDParams {
+ return &PutRecorderIDParams{
+ Context: ctx,
+ }
+}
+
+// NewPutRecorderIDParamsWithHTTPClient creates a new PutRecorderIDParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewPutRecorderIDParamsWithHTTPClient(client *http.Client) *PutRecorderIDParams {
+ return &PutRecorderIDParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+PutRecorderIDParams contains all the parameters to send to the API endpoint
+
+ for the put recorder ID operation.
+
+ Typically these are written to a http.Request.
+*/
+type PutRecorderIDParams struct {
+
+ /* Config.
+
+ Recorder configuration
+ */
+ Config *models.RecorderSpec
+
+ /* ID.
+
+ ID of recorder
+ */
+ ID int64
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the put recorder ID params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *PutRecorderIDParams) WithDefaults() *PutRecorderIDParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the put recorder ID params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *PutRecorderIDParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the put recorder ID params
+func (o *PutRecorderIDParams) WithTimeout(timeout time.Duration) *PutRecorderIDParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the put recorder ID params
+func (o *PutRecorderIDParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the put recorder ID params
+func (o *PutRecorderIDParams) WithContext(ctx context.Context) *PutRecorderIDParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the put recorder ID params
+func (o *PutRecorderIDParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the put recorder ID params
+func (o *PutRecorderIDParams) WithHTTPClient(client *http.Client) *PutRecorderIDParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the put recorder ID params
+func (o *PutRecorderIDParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithConfig adds the config to the put recorder ID params
+func (o *PutRecorderIDParams) WithConfig(config *models.RecorderSpec) *PutRecorderIDParams {
+ o.SetConfig(config)
+ return o
+}
+
+// SetConfig adds the config to the put recorder ID params
+func (o *PutRecorderIDParams) SetConfig(config *models.RecorderSpec) {
+ o.Config = config
+}
+
+// WithID adds the id to the put recorder ID params
+func (o *PutRecorderIDParams) WithID(id int64) *PutRecorderIDParams {
+ o.SetID(id)
+ return o
+}
+
+// SetID adds the id to the put recorder ID params
+func (o *PutRecorderIDParams) SetID(id int64) {
+ o.ID = id
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *PutRecorderIDParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+ if o.Config != nil {
+ if err := r.SetBodyParam(o.Config); err != nil {
+ return err
+ }
+ }
+
+ // path param id
+ if err := r.SetPathParam("id", swag.FormatInt64(o.ID)); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/recorder/put_recorder_id_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/recorder/put_recorder_id_responses.go
new file mode 100644
index 000000000..996c87026
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/recorder/put_recorder_id_responses.go
@@ -0,0 +1,270 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package recorder
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/cilium/cilium/api/v1/models"
+)
+
+// PutRecorderIDReader is a Reader for the PutRecorderID structure.
+type PutRecorderIDReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *PutRecorderIDReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewPutRecorderIDOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 201:
+ result := NewPutRecorderIDCreated()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 403:
+ result := NewPutRecorderIDForbidden()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ case 500:
+ result := NewPutRecorderIDFailure()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ default:
+ return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code())
+ }
+}
+
+// NewPutRecorderIDOK creates a PutRecorderIDOK with default headers values
+func NewPutRecorderIDOK() *PutRecorderIDOK {
+ return &PutRecorderIDOK{}
+}
+
+/*
+PutRecorderIDOK describes a response with status code 200, with default header values.
+
+Updated
+*/
+type PutRecorderIDOK struct {
+}
+
+// IsSuccess returns true when this put recorder Id o k response has a 2xx status code
+func (o *PutRecorderIDOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this put recorder Id o k response has a 3xx status code
+func (o *PutRecorderIDOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this put recorder Id o k response has a 4xx status code
+func (o *PutRecorderIDOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this put recorder Id o k response has a 5xx status code
+func (o *PutRecorderIDOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this put recorder Id o k response a status code equal to that given
+func (o *PutRecorderIDOK) IsCode(code int) bool {
+ return code == 200
+}
+
+func (o *PutRecorderIDOK) Error() string {
+ return fmt.Sprintf("[PUT /recorder/{id}][%d] putRecorderIdOK ", 200)
+}
+
+func (o *PutRecorderIDOK) String() string {
+ return fmt.Sprintf("[PUT /recorder/{id}][%d] putRecorderIdOK ", 200)
+}
+
+func (o *PutRecorderIDOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
+
+// NewPutRecorderIDCreated creates a PutRecorderIDCreated with default headers values
+func NewPutRecorderIDCreated() *PutRecorderIDCreated {
+ return &PutRecorderIDCreated{}
+}
+
+/*
+PutRecorderIDCreated describes a response with status code 201, with default header values.
+
+Created
+*/
+type PutRecorderIDCreated struct {
+}
+
+// IsSuccess returns true when this put recorder Id created response has a 2xx status code
+func (o *PutRecorderIDCreated) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this put recorder Id created response has a 3xx status code
+func (o *PutRecorderIDCreated) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this put recorder Id created response has a 4xx status code
+func (o *PutRecorderIDCreated) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this put recorder Id created response has a 5xx status code
+func (o *PutRecorderIDCreated) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this put recorder Id created response a status code equal to that given
+func (o *PutRecorderIDCreated) IsCode(code int) bool {
+ return code == 201
+}
+
+func (o *PutRecorderIDCreated) Error() string {
+ return fmt.Sprintf("[PUT /recorder/{id}][%d] putRecorderIdCreated ", 201)
+}
+
+func (o *PutRecorderIDCreated) String() string {
+ return fmt.Sprintf("[PUT /recorder/{id}][%d] putRecorderIdCreated ", 201)
+}
+
+func (o *PutRecorderIDCreated) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
+
+// NewPutRecorderIDForbidden creates a PutRecorderIDForbidden with default headers values
+func NewPutRecorderIDForbidden() *PutRecorderIDForbidden {
+ return &PutRecorderIDForbidden{}
+}
+
+/*
+PutRecorderIDForbidden describes a response with status code 403, with default header values.
+
+Forbidden
+*/
+type PutRecorderIDForbidden struct {
+}
+
+// IsSuccess returns true when this put recorder Id forbidden response has a 2xx status code
+func (o *PutRecorderIDForbidden) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this put recorder Id forbidden response has a 3xx status code
+func (o *PutRecorderIDForbidden) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this put recorder Id forbidden response has a 4xx status code
+func (o *PutRecorderIDForbidden) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this put recorder Id forbidden response has a 5xx status code
+func (o *PutRecorderIDForbidden) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this put recorder Id forbidden response a status code equal to that given
+func (o *PutRecorderIDForbidden) IsCode(code int) bool {
+ return code == 403
+}
+
+func (o *PutRecorderIDForbidden) Error() string {
+ return fmt.Sprintf("[PUT /recorder/{id}][%d] putRecorderIdForbidden ", 403)
+}
+
+func (o *PutRecorderIDForbidden) String() string {
+ return fmt.Sprintf("[PUT /recorder/{id}][%d] putRecorderIdForbidden ", 403)
+}
+
+func (o *PutRecorderIDForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
+
+// NewPutRecorderIDFailure creates a PutRecorderIDFailure with default headers values
+func NewPutRecorderIDFailure() *PutRecorderIDFailure {
+ return &PutRecorderIDFailure{}
+}
+
+/*
+PutRecorderIDFailure describes a response with status code 500, with default header values.
+
+Error while creating recorder
+*/
+type PutRecorderIDFailure struct {
+ Payload models.Error
+}
+
+// IsSuccess returns true when this put recorder Id failure response has a 2xx status code
+func (o *PutRecorderIDFailure) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this put recorder Id failure response has a 3xx status code
+func (o *PutRecorderIDFailure) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this put recorder Id failure response has a 4xx status code
+func (o *PutRecorderIDFailure) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this put recorder Id failure response has a 5xx status code
+func (o *PutRecorderIDFailure) IsServerError() bool {
+ return true
+}
+
+// IsCode returns true when this put recorder Id failure response a status code equal to that given
+func (o *PutRecorderIDFailure) IsCode(code int) bool {
+ return code == 500
+}
+
+func (o *PutRecorderIDFailure) Error() string {
+ return fmt.Sprintf("[PUT /recorder/{id}][%d] putRecorderIdFailure %+v", 500, o.Payload)
+}
+
+func (o *PutRecorderIDFailure) String() string {
+ return fmt.Sprintf("[PUT /recorder/{id}][%d] putRecorderIdFailure %+v", 500, o.Payload)
+}
+
+func (o *PutRecorderIDFailure) GetPayload() models.Error {
+ return o.Payload
+}
+
+func (o *PutRecorderIDFailure) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/recorder/recorder_client.go b/vendor/github.com/cilium/cilium/api/v1/client/recorder/recorder_client.go
new file mode 100644
index 000000000..883e304be
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/recorder/recorder_client.go
@@ -0,0 +1,243 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package recorder
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+)
+
+// New creates a new recorder API client.
+func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService {
+ return &Client{transport: transport, formats: formats}
+}
+
+/*
+Client for recorder API
+*/
+type Client struct {
+ transport runtime.ClientTransport
+ formats strfmt.Registry
+}
+
+// ClientOption is the option for Client methods
+type ClientOption func(*runtime.ClientOperation)
+
+// ClientService is the interface for Client methods
+type ClientService interface {
+ DeleteRecorderID(params *DeleteRecorderIDParams, opts ...ClientOption) (*DeleteRecorderIDOK, error)
+
+ GetRecorder(params *GetRecorderParams, opts ...ClientOption) (*GetRecorderOK, error)
+
+ GetRecorderID(params *GetRecorderIDParams, opts ...ClientOption) (*GetRecorderIDOK, error)
+
+ GetRecorderMasks(params *GetRecorderMasksParams, opts ...ClientOption) (*GetRecorderMasksOK, error)
+
+ PutRecorderID(params *PutRecorderIDParams, opts ...ClientOption) (*PutRecorderIDOK, *PutRecorderIDCreated, error)
+
+ SetTransport(transport runtime.ClientTransport)
+}
+
+/*
+DeleteRecorderID deletes a recorder
+*/
+func (a *Client) DeleteRecorderID(params *DeleteRecorderIDParams, opts ...ClientOption) (*DeleteRecorderIDOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewDeleteRecorderIDParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "DeleteRecorderID",
+ Method: "DELETE",
+ PathPattern: "/recorder/{id}",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &DeleteRecorderIDReader{formats: a.formats},
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*DeleteRecorderIDOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for DeleteRecorderID: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+/*
+GetRecorder retrieves list of all recorders
+*/
+func (a *Client) GetRecorder(params *GetRecorderParams, opts ...ClientOption) (*GetRecorderOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewGetRecorderParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "GetRecorder",
+ Method: "GET",
+ PathPattern: "/recorder",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &GetRecorderReader{formats: a.formats},
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*GetRecorderOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for GetRecorder: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+/*
+GetRecorderID retrieves configuration of a recorder
+*/
+func (a *Client) GetRecorderID(params *GetRecorderIDParams, opts ...ClientOption) (*GetRecorderIDOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewGetRecorderIDParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "GetRecorderID",
+ Method: "GET",
+ PathPattern: "/recorder/{id}",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &GetRecorderIDReader{formats: a.formats},
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*GetRecorderIDOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for GetRecorderID: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+/*
+GetRecorderMasks retrieves list of all recorder masks
+*/
+func (a *Client) GetRecorderMasks(params *GetRecorderMasksParams, opts ...ClientOption) (*GetRecorderMasksOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewGetRecorderMasksParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "GetRecorderMasks",
+ Method: "GET",
+ PathPattern: "/recorder/masks",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &GetRecorderMasksReader{formats: a.formats},
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*GetRecorderMasksOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for GetRecorderMasks: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+/*
+PutRecorderID creates or update recorder
+*/
+func (a *Client) PutRecorderID(params *PutRecorderIDParams, opts ...ClientOption) (*PutRecorderIDOK, *PutRecorderIDCreated, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewPutRecorderIDParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "PutRecorderID",
+ Method: "PUT",
+ PathPattern: "/recorder/{id}",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &PutRecorderIDReader{formats: a.formats},
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, nil, err
+ }
+ switch value := result.(type) {
+ case *PutRecorderIDOK:
+ return value, nil, nil
+ case *PutRecorderIDCreated:
+ return nil, value, nil
+ }
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for recorder: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+// SetTransport changes the transport on the client
+func (a *Client) SetTransport(transport runtime.ClientTransport) {
+ a.transport = transport
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/service/delete_service_id_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/service/delete_service_id_parameters.go
new file mode 100644
index 000000000..a464423c8
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/service/delete_service_id_parameters.go
@@ -0,0 +1,155 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package service
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+)
+
+// NewDeleteServiceIDParams creates a new DeleteServiceIDParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewDeleteServiceIDParams() *DeleteServiceIDParams {
+ return &DeleteServiceIDParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewDeleteServiceIDParamsWithTimeout creates a new DeleteServiceIDParams object
+// with the ability to set a timeout on a request.
+func NewDeleteServiceIDParamsWithTimeout(timeout time.Duration) *DeleteServiceIDParams {
+ return &DeleteServiceIDParams{
+ timeout: timeout,
+ }
+}
+
+// NewDeleteServiceIDParamsWithContext creates a new DeleteServiceIDParams object
+// with the ability to set a context for a request.
+func NewDeleteServiceIDParamsWithContext(ctx context.Context) *DeleteServiceIDParams {
+ return &DeleteServiceIDParams{
+ Context: ctx,
+ }
+}
+
+// NewDeleteServiceIDParamsWithHTTPClient creates a new DeleteServiceIDParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewDeleteServiceIDParamsWithHTTPClient(client *http.Client) *DeleteServiceIDParams {
+ return &DeleteServiceIDParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+DeleteServiceIDParams contains all the parameters to send to the API endpoint
+
+ for the delete service ID operation.
+
+ Typically these are written to a http.Request.
+*/
+type DeleteServiceIDParams struct {
+
+ /* ID.
+
+ ID of service
+ */
+ ID int64
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the delete service ID params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *DeleteServiceIDParams) WithDefaults() *DeleteServiceIDParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the delete service ID params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *DeleteServiceIDParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the delete service ID params
+func (o *DeleteServiceIDParams) WithTimeout(timeout time.Duration) *DeleteServiceIDParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the delete service ID params
+func (o *DeleteServiceIDParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the delete service ID params
+func (o *DeleteServiceIDParams) WithContext(ctx context.Context) *DeleteServiceIDParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the delete service ID params
+func (o *DeleteServiceIDParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the delete service ID params
+func (o *DeleteServiceIDParams) WithHTTPClient(client *http.Client) *DeleteServiceIDParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the delete service ID params
+func (o *DeleteServiceIDParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithID adds the id to the delete service ID params
+func (o *DeleteServiceIDParams) WithID(id int64) *DeleteServiceIDParams {
+ o.SetID(id)
+ return o
+}
+
+// SetID adds the id to the delete service ID params
+func (o *DeleteServiceIDParams) SetID(id int64) {
+ o.ID = id
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *DeleteServiceIDParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ // path param id
+ if err := r.SetPathParam("id", swag.FormatInt64(o.ID)); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/service/delete_service_id_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/service/delete_service_id_responses.go
new file mode 100644
index 000000000..21926d9c4
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/service/delete_service_id_responses.go
@@ -0,0 +1,270 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package service
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/cilium/cilium/api/v1/models"
+)
+
+// DeleteServiceIDReader is a Reader for the DeleteServiceID structure.
+type DeleteServiceIDReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *DeleteServiceIDReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewDeleteServiceIDOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 403:
+ result := NewDeleteServiceIDForbidden()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ case 404:
+ result := NewDeleteServiceIDNotFound()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ case 500:
+ result := NewDeleteServiceIDFailure()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ default:
+ return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code())
+ }
+}
+
+// NewDeleteServiceIDOK creates a DeleteServiceIDOK with default headers values
+func NewDeleteServiceIDOK() *DeleteServiceIDOK {
+ return &DeleteServiceIDOK{}
+}
+
+/*
+DeleteServiceIDOK describes a response with status code 200, with default header values.
+
+Success
+*/
+type DeleteServiceIDOK struct {
+}
+
+// IsSuccess returns true when this delete service Id o k response has a 2xx status code
+func (o *DeleteServiceIDOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this delete service Id o k response has a 3xx status code
+func (o *DeleteServiceIDOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this delete service Id o k response has a 4xx status code
+func (o *DeleteServiceIDOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this delete service Id o k response has a 5xx status code
+func (o *DeleteServiceIDOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this delete service Id o k response a status code equal to that given
+func (o *DeleteServiceIDOK) IsCode(code int) bool {
+ return code == 200
+}
+
+func (o *DeleteServiceIDOK) Error() string {
+ return fmt.Sprintf("[DELETE /service/{id}][%d] deleteServiceIdOK ", 200)
+}
+
+func (o *DeleteServiceIDOK) String() string {
+ return fmt.Sprintf("[DELETE /service/{id}][%d] deleteServiceIdOK ", 200)
+}
+
+func (o *DeleteServiceIDOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
+
+// NewDeleteServiceIDForbidden creates a DeleteServiceIDForbidden with default headers values
+func NewDeleteServiceIDForbidden() *DeleteServiceIDForbidden {
+ return &DeleteServiceIDForbidden{}
+}
+
+/*
+DeleteServiceIDForbidden describes a response with status code 403, with default header values.
+
+Forbidden
+*/
+type DeleteServiceIDForbidden struct {
+}
+
+// IsSuccess returns true when this delete service Id forbidden response has a 2xx status code
+func (o *DeleteServiceIDForbidden) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this delete service Id forbidden response has a 3xx status code
+func (o *DeleteServiceIDForbidden) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this delete service Id forbidden response has a 4xx status code
+func (o *DeleteServiceIDForbidden) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this delete service Id forbidden response has a 5xx status code
+func (o *DeleteServiceIDForbidden) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this delete service Id forbidden response a status code equal to that given
+func (o *DeleteServiceIDForbidden) IsCode(code int) bool {
+ return code == 403
+}
+
+func (o *DeleteServiceIDForbidden) Error() string {
+ return fmt.Sprintf("[DELETE /service/{id}][%d] deleteServiceIdForbidden ", 403)
+}
+
+func (o *DeleteServiceIDForbidden) String() string {
+ return fmt.Sprintf("[DELETE /service/{id}][%d] deleteServiceIdForbidden ", 403)
+}
+
+func (o *DeleteServiceIDForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
+
+// NewDeleteServiceIDNotFound creates a DeleteServiceIDNotFound with default headers values
+func NewDeleteServiceIDNotFound() *DeleteServiceIDNotFound {
+ return &DeleteServiceIDNotFound{}
+}
+
+/*
+DeleteServiceIDNotFound describes a response with status code 404, with default header values.
+
+Service not found
+*/
+type DeleteServiceIDNotFound struct {
+}
+
+// IsSuccess returns true when this delete service Id not found response has a 2xx status code
+func (o *DeleteServiceIDNotFound) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this delete service Id not found response has a 3xx status code
+func (o *DeleteServiceIDNotFound) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this delete service Id not found response has a 4xx status code
+func (o *DeleteServiceIDNotFound) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this delete service Id not found response has a 5xx status code
+func (o *DeleteServiceIDNotFound) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this delete service Id not found response a status code equal to that given
+func (o *DeleteServiceIDNotFound) IsCode(code int) bool {
+ return code == 404
+}
+
+func (o *DeleteServiceIDNotFound) Error() string {
+ return fmt.Sprintf("[DELETE /service/{id}][%d] deleteServiceIdNotFound ", 404)
+}
+
+func (o *DeleteServiceIDNotFound) String() string {
+ return fmt.Sprintf("[DELETE /service/{id}][%d] deleteServiceIdNotFound ", 404)
+}
+
+func (o *DeleteServiceIDNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
+
+// NewDeleteServiceIDFailure creates a DeleteServiceIDFailure with default headers values
+func NewDeleteServiceIDFailure() *DeleteServiceIDFailure {
+ return &DeleteServiceIDFailure{}
+}
+
+/*
+DeleteServiceIDFailure describes a response with status code 500, with default header values.
+
+Service deletion failed
+*/
+type DeleteServiceIDFailure struct {
+ Payload models.Error
+}
+
+// IsSuccess returns true when this delete service Id failure response has a 2xx status code
+func (o *DeleteServiceIDFailure) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this delete service Id failure response has a 3xx status code
+func (o *DeleteServiceIDFailure) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this delete service Id failure response has a 4xx status code
+func (o *DeleteServiceIDFailure) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this delete service Id failure response has a 5xx status code
+func (o *DeleteServiceIDFailure) IsServerError() bool {
+ return true
+}
+
+// IsCode returns true when this delete service Id failure response a status code equal to that given
+func (o *DeleteServiceIDFailure) IsCode(code int) bool {
+ return code == 500
+}
+
+func (o *DeleteServiceIDFailure) Error() string {
+ return fmt.Sprintf("[DELETE /service/{id}][%d] deleteServiceIdFailure %+v", 500, o.Payload)
+}
+
+func (o *DeleteServiceIDFailure) String() string {
+ return fmt.Sprintf("[DELETE /service/{id}][%d] deleteServiceIdFailure %+v", 500, o.Payload)
+}
+
+func (o *DeleteServiceIDFailure) GetPayload() models.Error {
+ return o.Payload
+}
+
+func (o *DeleteServiceIDFailure) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/service/get_lrp_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/service/get_lrp_parameters.go
new file mode 100644
index 000000000..beb277ecd
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/service/get_lrp_parameters.go
@@ -0,0 +1,131 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package service
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewGetLrpParams creates a new GetLrpParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewGetLrpParams() *GetLrpParams {
+ return &GetLrpParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewGetLrpParamsWithTimeout creates a new GetLrpParams object
+// with the ability to set a timeout on a request.
+func NewGetLrpParamsWithTimeout(timeout time.Duration) *GetLrpParams {
+ return &GetLrpParams{
+ timeout: timeout,
+ }
+}
+
+// NewGetLrpParamsWithContext creates a new GetLrpParams object
+// with the ability to set a context for a request.
+func NewGetLrpParamsWithContext(ctx context.Context) *GetLrpParams {
+ return &GetLrpParams{
+ Context: ctx,
+ }
+}
+
+// NewGetLrpParamsWithHTTPClient creates a new GetLrpParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewGetLrpParamsWithHTTPClient(client *http.Client) *GetLrpParams {
+ return &GetLrpParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+GetLrpParams contains all the parameters to send to the API endpoint
+
+ for the get lrp operation.
+
+ Typically these are written to a http.Request.
+*/
+type GetLrpParams struct {
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the get lrp params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetLrpParams) WithDefaults() *GetLrpParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the get lrp params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetLrpParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the get lrp params
+func (o *GetLrpParams) WithTimeout(timeout time.Duration) *GetLrpParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the get lrp params
+func (o *GetLrpParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the get lrp params
+func (o *GetLrpParams) WithContext(ctx context.Context) *GetLrpParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the get lrp params
+func (o *GetLrpParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the get lrp params
+func (o *GetLrpParams) WithHTTPClient(client *http.Client) *GetLrpParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the get lrp params
+func (o *GetLrpParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *GetLrpParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/service/get_lrp_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/service/get_lrp_responses.go
new file mode 100644
index 000000000..ac5974ce5
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/service/get_lrp_responses.go
@@ -0,0 +1,99 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package service
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/cilium/cilium/api/v1/models"
+)
+
+// GetLrpReader is a Reader for the GetLrp structure.
+type GetLrpReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *GetLrpReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewGetLrpOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code())
+ }
+}
+
+// NewGetLrpOK creates a GetLrpOK with default headers values
+func NewGetLrpOK() *GetLrpOK {
+ return &GetLrpOK{}
+}
+
+/*
+GetLrpOK describes a response with status code 200, with default header values.
+
+Success
+*/
+type GetLrpOK struct {
+ Payload []*models.LRPSpec
+}
+
+// IsSuccess returns true when this get lrp o k response has a 2xx status code
+func (o *GetLrpOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this get lrp o k response has a 3xx status code
+func (o *GetLrpOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get lrp o k response has a 4xx status code
+func (o *GetLrpOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get lrp o k response has a 5xx status code
+func (o *GetLrpOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get lrp o k response a status code equal to that given
+func (o *GetLrpOK) IsCode(code int) bool {
+ return code == 200
+}
+
+func (o *GetLrpOK) Error() string {
+ return fmt.Sprintf("[GET /lrp][%d] getLrpOK %+v", 200, o.Payload)
+}
+
+func (o *GetLrpOK) String() string {
+ return fmt.Sprintf("[GET /lrp][%d] getLrpOK %+v", 200, o.Payload)
+}
+
+func (o *GetLrpOK) GetPayload() []*models.LRPSpec {
+ return o.Payload
+}
+
+func (o *GetLrpOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/service/get_service_id_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/service/get_service_id_parameters.go
new file mode 100644
index 000000000..dac152aae
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/service/get_service_id_parameters.go
@@ -0,0 +1,155 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package service
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+)
+
+// NewGetServiceIDParams creates a new GetServiceIDParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewGetServiceIDParams() *GetServiceIDParams {
+ return &GetServiceIDParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewGetServiceIDParamsWithTimeout creates a new GetServiceIDParams object
+// with the ability to set a timeout on a request.
+func NewGetServiceIDParamsWithTimeout(timeout time.Duration) *GetServiceIDParams {
+ return &GetServiceIDParams{
+ timeout: timeout,
+ }
+}
+
+// NewGetServiceIDParamsWithContext creates a new GetServiceIDParams object
+// with the ability to set a context for a request.
+func NewGetServiceIDParamsWithContext(ctx context.Context) *GetServiceIDParams {
+ return &GetServiceIDParams{
+ Context: ctx,
+ }
+}
+
+// NewGetServiceIDParamsWithHTTPClient creates a new GetServiceIDParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewGetServiceIDParamsWithHTTPClient(client *http.Client) *GetServiceIDParams {
+ return &GetServiceIDParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+GetServiceIDParams contains all the parameters to send to the API endpoint
+
+ for the get service ID operation.
+
+ Typically these are written to a http.Request.
+*/
+type GetServiceIDParams struct {
+
+ /* ID.
+
+ ID of service
+ */
+ ID int64
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the get service ID params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetServiceIDParams) WithDefaults() *GetServiceIDParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the get service ID params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetServiceIDParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the get service ID params
+func (o *GetServiceIDParams) WithTimeout(timeout time.Duration) *GetServiceIDParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the get service ID params
+func (o *GetServiceIDParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the get service ID params
+func (o *GetServiceIDParams) WithContext(ctx context.Context) *GetServiceIDParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the get service ID params
+func (o *GetServiceIDParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the get service ID params
+func (o *GetServiceIDParams) WithHTTPClient(client *http.Client) *GetServiceIDParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the get service ID params
+func (o *GetServiceIDParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithID adds the id to the get service ID params
+func (o *GetServiceIDParams) WithID(id int64) *GetServiceIDParams {
+ o.SetID(id)
+ return o
+}
+
+// SetID adds the id to the get service ID params
+func (o *GetServiceIDParams) SetID(id int64) {
+ o.ID = id
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *GetServiceIDParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ // path param id
+ if err := r.SetPathParam("id", swag.FormatInt64(o.ID)); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/service/get_service_id_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/service/get_service_id_responses.go
new file mode 100644
index 000000000..4b198ced9
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/service/get_service_id_responses.go
@@ -0,0 +1,158 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package service
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/cilium/cilium/api/v1/models"
+)
+
+// GetServiceIDReader is a Reader for the GetServiceID structure.
+type GetServiceIDReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *GetServiceIDReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewGetServiceIDOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 404:
+ result := NewGetServiceIDNotFound()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ default:
+ return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code())
+ }
+}
+
+// NewGetServiceIDOK creates a GetServiceIDOK with default headers values
+func NewGetServiceIDOK() *GetServiceIDOK {
+ return &GetServiceIDOK{}
+}
+
+/*
+GetServiceIDOK describes a response with status code 200, with default header values.
+
+Success
+*/
+type GetServiceIDOK struct {
+ Payload *models.Service
+}
+
+// IsSuccess returns true when this get service Id o k response has a 2xx status code
+func (o *GetServiceIDOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this get service Id o k response has a 3xx status code
+func (o *GetServiceIDOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get service Id o k response has a 4xx status code
+func (o *GetServiceIDOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get service Id o k response has a 5xx status code
+func (o *GetServiceIDOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get service Id o k response a status code equal to that given
+func (o *GetServiceIDOK) IsCode(code int) bool {
+ return code == 200
+}
+
+func (o *GetServiceIDOK) Error() string {
+ return fmt.Sprintf("[GET /service/{id}][%d] getServiceIdOK %+v", 200, o.Payload)
+}
+
+func (o *GetServiceIDOK) String() string {
+ return fmt.Sprintf("[GET /service/{id}][%d] getServiceIdOK %+v", 200, o.Payload)
+}
+
+func (o *GetServiceIDOK) GetPayload() *models.Service {
+ return o.Payload
+}
+
+func (o *GetServiceIDOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ o.Payload = new(models.Service)
+
+ // response payload
+ if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewGetServiceIDNotFound creates a GetServiceIDNotFound with default headers values
+func NewGetServiceIDNotFound() *GetServiceIDNotFound {
+ return &GetServiceIDNotFound{}
+}
+
+/*
+GetServiceIDNotFound describes a response with status code 404, with default header values.
+
+Service not found
+*/
+type GetServiceIDNotFound struct {
+}
+
+// IsSuccess returns true when this get service Id not found response has a 2xx status code
+func (o *GetServiceIDNotFound) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this get service Id not found response has a 3xx status code
+func (o *GetServiceIDNotFound) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get service Id not found response has a 4xx status code
+func (o *GetServiceIDNotFound) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this get service Id not found response has a 5xx status code
+func (o *GetServiceIDNotFound) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get service Id not found response a status code equal to that given
+func (o *GetServiceIDNotFound) IsCode(code int) bool {
+ return code == 404
+}
+
+func (o *GetServiceIDNotFound) Error() string {
+ return fmt.Sprintf("[GET /service/{id}][%d] getServiceIdNotFound ", 404)
+}
+
+func (o *GetServiceIDNotFound) String() string {
+ return fmt.Sprintf("[GET /service/{id}][%d] getServiceIdNotFound ", 404)
+}
+
+func (o *GetServiceIDNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/service/get_service_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/service/get_service_parameters.go
new file mode 100644
index 000000000..ff7dfcb84
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/service/get_service_parameters.go
@@ -0,0 +1,131 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package service
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewGetServiceParams creates a new GetServiceParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewGetServiceParams() *GetServiceParams {
+ return &GetServiceParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewGetServiceParamsWithTimeout creates a new GetServiceParams object
+// with the ability to set a timeout on a request.
+func NewGetServiceParamsWithTimeout(timeout time.Duration) *GetServiceParams {
+ return &GetServiceParams{
+ timeout: timeout,
+ }
+}
+
+// NewGetServiceParamsWithContext creates a new GetServiceParams object
+// with the ability to set a context for a request.
+func NewGetServiceParamsWithContext(ctx context.Context) *GetServiceParams {
+ return &GetServiceParams{
+ Context: ctx,
+ }
+}
+
+// NewGetServiceParamsWithHTTPClient creates a new GetServiceParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewGetServiceParamsWithHTTPClient(client *http.Client) *GetServiceParams {
+ return &GetServiceParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+GetServiceParams contains all the parameters to send to the API endpoint
+
+ for the get service operation.
+
+ Typically these are written to a http.Request.
+*/
+type GetServiceParams struct {
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the get service params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetServiceParams) WithDefaults() *GetServiceParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the get service params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetServiceParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the get service params
+func (o *GetServiceParams) WithTimeout(timeout time.Duration) *GetServiceParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the get service params
+func (o *GetServiceParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the get service params
+func (o *GetServiceParams) WithContext(ctx context.Context) *GetServiceParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the get service params
+func (o *GetServiceParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the get service params
+func (o *GetServiceParams) WithHTTPClient(client *http.Client) *GetServiceParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the get service params
+func (o *GetServiceParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *GetServiceParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/service/get_service_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/service/get_service_responses.go
new file mode 100644
index 000000000..dbf75ba84
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/service/get_service_responses.go
@@ -0,0 +1,99 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package service
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/cilium/cilium/api/v1/models"
+)
+
+// GetServiceReader is a Reader for the GetService structure.
+type GetServiceReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *GetServiceReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewGetServiceOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code())
+ }
+}
+
+// NewGetServiceOK creates a GetServiceOK with default headers values
+func NewGetServiceOK() *GetServiceOK {
+ return &GetServiceOK{}
+}
+
+/*
+GetServiceOK describes a response with status code 200, with default header values.
+
+Success
+*/
+type GetServiceOK struct {
+ Payload []*models.Service
+}
+
+// IsSuccess returns true when this get service o k response has a 2xx status code
+func (o *GetServiceOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this get service o k response has a 3xx status code
+func (o *GetServiceOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get service o k response has a 4xx status code
+func (o *GetServiceOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get service o k response has a 5xx status code
+func (o *GetServiceOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get service o k response a status code equal to that given
+func (o *GetServiceOK) IsCode(code int) bool {
+ return code == 200
+}
+
+func (o *GetServiceOK) Error() string {
+ return fmt.Sprintf("[GET /service][%d] getServiceOK %+v", 200, o.Payload)
+}
+
+func (o *GetServiceOK) String() string {
+ return fmt.Sprintf("[GET /service][%d] getServiceOK %+v", 200, o.Payload)
+}
+
+func (o *GetServiceOK) GetPayload() []*models.Service {
+ return o.Payload
+}
+
+func (o *GetServiceOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/service/put_service_id_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/service/put_service_id_parameters.go
new file mode 100644
index 000000000..b1877e4b7
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/service/put_service_id_parameters.go
@@ -0,0 +1,179 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package service
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+
+ "github.com/cilium/cilium/api/v1/models"
+)
+
+// NewPutServiceIDParams creates a new PutServiceIDParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewPutServiceIDParams() *PutServiceIDParams {
+ return &PutServiceIDParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewPutServiceIDParamsWithTimeout creates a new PutServiceIDParams object
+// with the ability to set a timeout on a request.
+func NewPutServiceIDParamsWithTimeout(timeout time.Duration) *PutServiceIDParams {
+ return &PutServiceIDParams{
+ timeout: timeout,
+ }
+}
+
+// NewPutServiceIDParamsWithContext creates a new PutServiceIDParams object
+// with the ability to set a context for a request.
+func NewPutServiceIDParamsWithContext(ctx context.Context) *PutServiceIDParams {
+ return &PutServiceIDParams{
+ Context: ctx,
+ }
+}
+
+// NewPutServiceIDParamsWithHTTPClient creates a new PutServiceIDParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewPutServiceIDParamsWithHTTPClient(client *http.Client) *PutServiceIDParams {
+ return &PutServiceIDParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+PutServiceIDParams contains all the parameters to send to the API endpoint
+
+ for the put service ID operation.
+
+ Typically these are written to a http.Request.
+*/
+type PutServiceIDParams struct {
+
+ /* Config.
+
+ Service configuration
+ */
+ Config *models.ServiceSpec
+
+ /* ID.
+
+ ID of service
+ */
+ ID int64
+
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the put service ID params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *PutServiceIDParams) WithDefaults() *PutServiceIDParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the put service ID params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *PutServiceIDParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the put service ID params
+func (o *PutServiceIDParams) WithTimeout(timeout time.Duration) *PutServiceIDParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the put service ID params
+func (o *PutServiceIDParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the put service ID params
+func (o *PutServiceIDParams) WithContext(ctx context.Context) *PutServiceIDParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the put service ID params
+func (o *PutServiceIDParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the put service ID params
+func (o *PutServiceIDParams) WithHTTPClient(client *http.Client) *PutServiceIDParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the put service ID params
+func (o *PutServiceIDParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WithConfig adds the config to the put service ID params
+func (o *PutServiceIDParams) WithConfig(config *models.ServiceSpec) *PutServiceIDParams {
+ o.SetConfig(config)
+ return o
+}
+
+// SetConfig adds the config to the put service ID params
+func (o *PutServiceIDParams) SetConfig(config *models.ServiceSpec) {
+ o.Config = config
+}
+
+// WithID adds the id to the put service ID params
+func (o *PutServiceIDParams) WithID(id int64) *PutServiceIDParams {
+ o.SetID(id)
+ return o
+}
+
+// SetID adds the id to the put service ID params
+func (o *PutServiceIDParams) SetID(id int64) {
+ o.ID = id
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *PutServiceIDParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+ if o.Config != nil {
+ if err := r.SetBodyParam(o.Config); err != nil {
+ return err
+ }
+ }
+
+ // path param id
+ if err := r.SetPathParam("id", swag.FormatInt64(o.ID)); err != nil {
+ return err
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/service/put_service_id_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/service/put_service_id_responses.go
new file mode 100644
index 000000000..af85fa32a
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/service/put_service_id_responses.go
@@ -0,0 +1,471 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package service
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/cilium/cilium/api/v1/models"
+)
+
+// PutServiceIDReader is a Reader for the PutServiceID structure.
+type PutServiceIDReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *PutServiceIDReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewPutServiceIDOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 201:
+ result := NewPutServiceIDCreated()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 403:
+ result := NewPutServiceIDForbidden()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ case 460:
+ result := NewPutServiceIDInvalidFrontend()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ case 461:
+ result := NewPutServiceIDInvalidBackend()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ case 500:
+ result := NewPutServiceIDFailure()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ case 501:
+ result := NewPutServiceIDUpdateBackendFailure()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ default:
+ return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code())
+ }
+}
+
+// NewPutServiceIDOK creates a PutServiceIDOK with default headers values
+func NewPutServiceIDOK() *PutServiceIDOK {
+ return &PutServiceIDOK{}
+}
+
+/*
+PutServiceIDOK describes a response with status code 200, with default header values.
+
+Updated
+*/
+type PutServiceIDOK struct {
+}
+
+// IsSuccess returns true when this put service Id o k response has a 2xx status code
+func (o *PutServiceIDOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this put service Id o k response has a 3xx status code
+func (o *PutServiceIDOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this put service Id o k response has a 4xx status code
+func (o *PutServiceIDOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this put service Id o k response has a 5xx status code
+func (o *PutServiceIDOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this put service Id o k response a status code equal to that given
+func (o *PutServiceIDOK) IsCode(code int) bool {
+ return code == 200
+}
+
+func (o *PutServiceIDOK) Error() string {
+ return fmt.Sprintf("[PUT /service/{id}][%d] putServiceIdOK ", 200)
+}
+
+func (o *PutServiceIDOK) String() string {
+ return fmt.Sprintf("[PUT /service/{id}][%d] putServiceIdOK ", 200)
+}
+
+func (o *PutServiceIDOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
+
+// NewPutServiceIDCreated creates a PutServiceIDCreated with default headers values
+func NewPutServiceIDCreated() *PutServiceIDCreated {
+ return &PutServiceIDCreated{}
+}
+
+/*
+PutServiceIDCreated describes a response with status code 201, with default header values.
+
+Created
+*/
+type PutServiceIDCreated struct {
+}
+
+// IsSuccess returns true when this put service Id created response has a 2xx status code
+func (o *PutServiceIDCreated) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this put service Id created response has a 3xx status code
+func (o *PutServiceIDCreated) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this put service Id created response has a 4xx status code
+func (o *PutServiceIDCreated) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this put service Id created response has a 5xx status code
+func (o *PutServiceIDCreated) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this put service Id created response a status code equal to that given
+func (o *PutServiceIDCreated) IsCode(code int) bool {
+ return code == 201
+}
+
+func (o *PutServiceIDCreated) Error() string {
+ return fmt.Sprintf("[PUT /service/{id}][%d] putServiceIdCreated ", 201)
+}
+
+func (o *PutServiceIDCreated) String() string {
+ return fmt.Sprintf("[PUT /service/{id}][%d] putServiceIdCreated ", 201)
+}
+
+func (o *PutServiceIDCreated) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
+
+// NewPutServiceIDForbidden creates a PutServiceIDForbidden with default headers values
+func NewPutServiceIDForbidden() *PutServiceIDForbidden {
+ return &PutServiceIDForbidden{}
+}
+
+/*
+PutServiceIDForbidden describes a response with status code 403, with default header values.
+
+Forbidden
+*/
+type PutServiceIDForbidden struct {
+}
+
+// IsSuccess returns true when this put service Id forbidden response has a 2xx status code
+func (o *PutServiceIDForbidden) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this put service Id forbidden response has a 3xx status code
+func (o *PutServiceIDForbidden) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this put service Id forbidden response has a 4xx status code
+func (o *PutServiceIDForbidden) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this put service Id forbidden response has a 5xx status code
+func (o *PutServiceIDForbidden) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this put service Id forbidden response a status code equal to that given
+func (o *PutServiceIDForbidden) IsCode(code int) bool {
+ return code == 403
+}
+
+func (o *PutServiceIDForbidden) Error() string {
+ return fmt.Sprintf("[PUT /service/{id}][%d] putServiceIdForbidden ", 403)
+}
+
+func (o *PutServiceIDForbidden) String() string {
+ return fmt.Sprintf("[PUT /service/{id}][%d] putServiceIdForbidden ", 403)
+}
+
+func (o *PutServiceIDForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
+
+// NewPutServiceIDInvalidFrontend creates a PutServiceIDInvalidFrontend with default headers values
+func NewPutServiceIDInvalidFrontend() *PutServiceIDInvalidFrontend {
+ return &PutServiceIDInvalidFrontend{}
+}
+
+/*
+PutServiceIDInvalidFrontend describes a response with status code 460, with default header values.
+
+Invalid frontend in service configuration
+*/
+type PutServiceIDInvalidFrontend struct {
+ Payload models.Error
+}
+
+// IsSuccess returns true when this put service Id invalid frontend response has a 2xx status code
+func (o *PutServiceIDInvalidFrontend) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this put service Id invalid frontend response has a 3xx status code
+func (o *PutServiceIDInvalidFrontend) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this put service Id invalid frontend response has a 4xx status code
+func (o *PutServiceIDInvalidFrontend) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this put service Id invalid frontend response has a 5xx status code
+func (o *PutServiceIDInvalidFrontend) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this put service Id invalid frontend response a status code equal to that given
+func (o *PutServiceIDInvalidFrontend) IsCode(code int) bool {
+ return code == 460
+}
+
+func (o *PutServiceIDInvalidFrontend) Error() string {
+ return fmt.Sprintf("[PUT /service/{id}][%d] putServiceIdInvalidFrontend %+v", 460, o.Payload)
+}
+
+func (o *PutServiceIDInvalidFrontend) String() string {
+ return fmt.Sprintf("[PUT /service/{id}][%d] putServiceIdInvalidFrontend %+v", 460, o.Payload)
+}
+
+func (o *PutServiceIDInvalidFrontend) GetPayload() models.Error {
+ return o.Payload
+}
+
+func (o *PutServiceIDInvalidFrontend) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewPutServiceIDInvalidBackend creates a PutServiceIDInvalidBackend with default headers values
+func NewPutServiceIDInvalidBackend() *PutServiceIDInvalidBackend {
+ return &PutServiceIDInvalidBackend{}
+}
+
+/*
+PutServiceIDInvalidBackend describes a response with status code 461, with default header values.
+
+Invalid backend in service configuration
+*/
+type PutServiceIDInvalidBackend struct {
+ Payload models.Error
+}
+
+// IsSuccess returns true when this put service Id invalid backend response has a 2xx status code
+func (o *PutServiceIDInvalidBackend) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this put service Id invalid backend response has a 3xx status code
+func (o *PutServiceIDInvalidBackend) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this put service Id invalid backend response has a 4xx status code
+func (o *PutServiceIDInvalidBackend) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this put service Id invalid backend response has a 5xx status code
+func (o *PutServiceIDInvalidBackend) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this put service Id invalid backend response a status code equal to that given
+func (o *PutServiceIDInvalidBackend) IsCode(code int) bool {
+ return code == 461
+}
+
+func (o *PutServiceIDInvalidBackend) Error() string {
+ return fmt.Sprintf("[PUT /service/{id}][%d] putServiceIdInvalidBackend %+v", 461, o.Payload)
+}
+
+func (o *PutServiceIDInvalidBackend) String() string {
+ return fmt.Sprintf("[PUT /service/{id}][%d] putServiceIdInvalidBackend %+v", 461, o.Payload)
+}
+
+func (o *PutServiceIDInvalidBackend) GetPayload() models.Error {
+ return o.Payload
+}
+
+func (o *PutServiceIDInvalidBackend) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewPutServiceIDFailure creates a PutServiceIDFailure with default headers values
+func NewPutServiceIDFailure() *PutServiceIDFailure {
+ return &PutServiceIDFailure{}
+}
+
+/*
+PutServiceIDFailure describes a response with status code 500, with default header values.
+
+Error while creating service
+*/
+type PutServiceIDFailure struct {
+ Payload models.Error
+}
+
+// IsSuccess returns true when this put service Id failure response has a 2xx status code
+func (o *PutServiceIDFailure) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this put service Id failure response has a 3xx status code
+func (o *PutServiceIDFailure) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this put service Id failure response has a 4xx status code
+func (o *PutServiceIDFailure) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this put service Id failure response has a 5xx status code
+func (o *PutServiceIDFailure) IsServerError() bool {
+ return true
+}
+
+// IsCode returns true when this put service Id failure response a status code equal to that given
+func (o *PutServiceIDFailure) IsCode(code int) bool {
+ return code == 500
+}
+
+func (o *PutServiceIDFailure) Error() string {
+ return fmt.Sprintf("[PUT /service/{id}][%d] putServiceIdFailure %+v", 500, o.Payload)
+}
+
+func (o *PutServiceIDFailure) String() string {
+ return fmt.Sprintf("[PUT /service/{id}][%d] putServiceIdFailure %+v", 500, o.Payload)
+}
+
+func (o *PutServiceIDFailure) GetPayload() models.Error {
+ return o.Payload
+}
+
+func (o *PutServiceIDFailure) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewPutServiceIDUpdateBackendFailure creates a PutServiceIDUpdateBackendFailure with default headers values
+func NewPutServiceIDUpdateBackendFailure() *PutServiceIDUpdateBackendFailure {
+ return &PutServiceIDUpdateBackendFailure{}
+}
+
+/*
+PutServiceIDUpdateBackendFailure describes a response with status code 501, with default header values.
+
+Error while updating backend states
+*/
+type PutServiceIDUpdateBackendFailure struct {
+ Payload models.Error
+}
+
+// IsSuccess returns true when this put service Id update backend failure response has a 2xx status code
+func (o *PutServiceIDUpdateBackendFailure) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this put service Id update backend failure response has a 3xx status code
+func (o *PutServiceIDUpdateBackendFailure) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this put service Id update backend failure response has a 4xx status code
+func (o *PutServiceIDUpdateBackendFailure) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this put service Id update backend failure response has a 5xx status code
+func (o *PutServiceIDUpdateBackendFailure) IsServerError() bool {
+ return true
+}
+
+// IsCode returns true when this put service Id update backend failure response a status code equal to that given
+func (o *PutServiceIDUpdateBackendFailure) IsCode(code int) bool {
+ return code == 501
+}
+
+func (o *PutServiceIDUpdateBackendFailure) Error() string {
+ return fmt.Sprintf("[PUT /service/{id}][%d] putServiceIdUpdateBackendFailure %+v", 501, o.Payload)
+}
+
+func (o *PutServiceIDUpdateBackendFailure) String() string {
+ return fmt.Sprintf("[PUT /service/{id}][%d] putServiceIdUpdateBackendFailure %+v", 501, o.Payload)
+}
+
+func (o *PutServiceIDUpdateBackendFailure) GetPayload() models.Error {
+ return o.Payload
+}
+
+func (o *PutServiceIDUpdateBackendFailure) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/service/service_client.go b/vendor/github.com/cilium/cilium/api/v1/client/service/service_client.go
new file mode 100644
index 000000000..332799a98
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/service/service_client.go
@@ -0,0 +1,243 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package service
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+)
+
+// New creates a new service API client.
+func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService {
+ return &Client{transport: transport, formats: formats}
+}
+
+/*
+Client for service API
+*/
+type Client struct {
+ transport runtime.ClientTransport
+ formats strfmt.Registry
+}
+
+// ClientOption is the option for Client methods
+type ClientOption func(*runtime.ClientOperation)
+
+// ClientService is the interface for Client methods
+type ClientService interface {
+ DeleteServiceID(params *DeleteServiceIDParams, opts ...ClientOption) (*DeleteServiceIDOK, error)
+
+ GetLrp(params *GetLrpParams, opts ...ClientOption) (*GetLrpOK, error)
+
+ GetService(params *GetServiceParams, opts ...ClientOption) (*GetServiceOK, error)
+
+ GetServiceID(params *GetServiceIDParams, opts ...ClientOption) (*GetServiceIDOK, error)
+
+ PutServiceID(params *PutServiceIDParams, opts ...ClientOption) (*PutServiceIDOK, *PutServiceIDCreated, error)
+
+ SetTransport(transport runtime.ClientTransport)
+}
+
+/*
+DeleteServiceID deletes a service
+*/
+func (a *Client) DeleteServiceID(params *DeleteServiceIDParams, opts ...ClientOption) (*DeleteServiceIDOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewDeleteServiceIDParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "DeleteServiceID",
+ Method: "DELETE",
+ PathPattern: "/service/{id}",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &DeleteServiceIDReader{formats: a.formats},
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*DeleteServiceIDOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for DeleteServiceID: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+/*
+GetLrp retrieves list of all local redirect policies
+*/
+func (a *Client) GetLrp(params *GetLrpParams, opts ...ClientOption) (*GetLrpOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewGetLrpParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "GetLrp",
+ Method: "GET",
+ PathPattern: "/lrp",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &GetLrpReader{formats: a.formats},
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*GetLrpOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for GetLrp: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+/*
+GetService retrieves list of all services
+*/
+func (a *Client) GetService(params *GetServiceParams, opts ...ClientOption) (*GetServiceOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewGetServiceParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "GetService",
+ Method: "GET",
+ PathPattern: "/service",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &GetServiceReader{formats: a.formats},
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*GetServiceOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for GetService: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+/*
+GetServiceID retrieves configuration of a service
+*/
+func (a *Client) GetServiceID(params *GetServiceIDParams, opts ...ClientOption) (*GetServiceIDOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewGetServiceIDParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "GetServiceID",
+ Method: "GET",
+ PathPattern: "/service/{id}",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &GetServiceIDReader{formats: a.formats},
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*GetServiceIDOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for GetServiceID: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+/*
+PutServiceID creates or update service
+*/
+func (a *Client) PutServiceID(params *PutServiceIDParams, opts ...ClientOption) (*PutServiceIDOK, *PutServiceIDCreated, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewPutServiceIDParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "PutServiceID",
+ Method: "PUT",
+ PathPattern: "/service/{id}",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &PutServiceIDReader{formats: a.formats},
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, nil, err
+ }
+ switch value := result.(type) {
+ case *PutServiceIDOK:
+ return value, nil, nil
+ case *PutServiceIDCreated:
+ return nil, value, nil
+ }
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for service: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+// SetTransport changes the transport on the client
+func (a *Client) SetTransport(transport runtime.ClientTransport) {
+ a.transport = transport
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/statedb/get_statedb_dump_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/statedb/get_statedb_dump_parameters.go
new file mode 100644
index 000000000..fe1c3ff1a
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/statedb/get_statedb_dump_parameters.go
@@ -0,0 +1,131 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package statedb
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewGetStatedbDumpParams creates a new GetStatedbDumpParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewGetStatedbDumpParams() *GetStatedbDumpParams {
+ return &GetStatedbDumpParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewGetStatedbDumpParamsWithTimeout creates a new GetStatedbDumpParams object
+// with the ability to set a timeout on a request.
+func NewGetStatedbDumpParamsWithTimeout(timeout time.Duration) *GetStatedbDumpParams {
+ return &GetStatedbDumpParams{
+ timeout: timeout,
+ }
+}
+
+// NewGetStatedbDumpParamsWithContext creates a new GetStatedbDumpParams object
+// with the ability to set a context for a request.
+func NewGetStatedbDumpParamsWithContext(ctx context.Context) *GetStatedbDumpParams {
+ return &GetStatedbDumpParams{
+ Context: ctx,
+ }
+}
+
+// NewGetStatedbDumpParamsWithHTTPClient creates a new GetStatedbDumpParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewGetStatedbDumpParamsWithHTTPClient(client *http.Client) *GetStatedbDumpParams {
+ return &GetStatedbDumpParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+GetStatedbDumpParams contains all the parameters to send to the API endpoint
+
+ for the get statedb dump operation.
+
+ Typically these are written to a http.Request.
+*/
+type GetStatedbDumpParams struct {
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the get statedb dump params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetStatedbDumpParams) WithDefaults() *GetStatedbDumpParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the get statedb dump params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetStatedbDumpParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the get statedb dump params
+func (o *GetStatedbDumpParams) WithTimeout(timeout time.Duration) *GetStatedbDumpParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the get statedb dump params
+func (o *GetStatedbDumpParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the get statedb dump params
+func (o *GetStatedbDumpParams) WithContext(ctx context.Context) *GetStatedbDumpParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the get statedb dump params
+func (o *GetStatedbDumpParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the get statedb dump params
+func (o *GetStatedbDumpParams) WithHTTPClient(client *http.Client) *GetStatedbDumpParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the get statedb dump params
+func (o *GetStatedbDumpParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *GetStatedbDumpParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/statedb/get_statedb_dump_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/statedb/get_statedb_dump_responses.go
new file mode 100644
index 000000000..3577f9c3b
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/statedb/get_statedb_dump_responses.go
@@ -0,0 +1,101 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package statedb
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+)
+
+// GetStatedbDumpReader is a Reader for the GetStatedbDump structure.
+type GetStatedbDumpReader struct {
+ formats strfmt.Registry
+ writer io.Writer
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *GetStatedbDumpReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewGetStatedbDumpOK(o.writer)
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code())
+ }
+}
+
+// NewGetStatedbDumpOK creates a GetStatedbDumpOK with default headers values
+func NewGetStatedbDumpOK(writer io.Writer) *GetStatedbDumpOK {
+ return &GetStatedbDumpOK{
+
+ Payload: writer,
+ }
+}
+
+/*
+GetStatedbDumpOK describes a response with status code 200, with default header values.
+
+Success
+*/
+type GetStatedbDumpOK struct {
+ Payload io.Writer
+}
+
+// IsSuccess returns true when this get statedb dump o k response has a 2xx status code
+func (o *GetStatedbDumpOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this get statedb dump o k response has a 3xx status code
+func (o *GetStatedbDumpOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get statedb dump o k response has a 4xx status code
+func (o *GetStatedbDumpOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get statedb dump o k response has a 5xx status code
+func (o *GetStatedbDumpOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get statedb dump o k response a status code equal to that given
+func (o *GetStatedbDumpOK) IsCode(code int) bool {
+ return code == 200
+}
+
+func (o *GetStatedbDumpOK) Error() string {
+ return fmt.Sprintf("[GET /statedb/dump][%d] getStatedbDumpOK %+v", 200, o.Payload)
+}
+
+func (o *GetStatedbDumpOK) String() string {
+ return fmt.Sprintf("[GET /statedb/dump][%d] getStatedbDumpOK %+v", 200, o.Payload)
+}
+
+func (o *GetStatedbDumpOK) GetPayload() io.Writer {
+ return o.Payload
+}
+
+func (o *GetStatedbDumpOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/client/statedb/statedb_client.go b/vendor/github.com/cilium/cilium/api/v1/client/statedb/statedb_client.go
new file mode 100644
index 000000000..69365c913
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/client/statedb/statedb_client.go
@@ -0,0 +1,83 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package statedb
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+)
+
+// New creates a new statedb API client.
+func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService {
+ return &Client{transport: transport, formats: formats}
+}
+
+/*
+Client for statedb API
+*/
+type Client struct {
+ transport runtime.ClientTransport
+ formats strfmt.Registry
+}
+
+// ClientOption is the option for Client methods
+type ClientOption func(*runtime.ClientOperation)
+
+// ClientService is the interface for Client methods
+type ClientService interface {
+ GetStatedbDump(params *GetStatedbDumpParams, writer io.Writer, opts ...ClientOption) (*GetStatedbDumpOK, error)
+
+ SetTransport(transport runtime.ClientTransport)
+}
+
+/*
+GetStatedbDump dumps state d b contents
+*/
+func (a *Client) GetStatedbDump(params *GetStatedbDumpParams, writer io.Writer, opts ...ClientOption) (*GetStatedbDumpOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewGetStatedbDumpParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "GetStatedbDump",
+ Method: "GET",
+ PathPattern: "/statedb/dump",
+ ProducesMediaTypes: []string{"application/octet-stream"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &GetStatedbDumpReader{formats: a.formats, writer: writer},
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*GetStatedbDumpOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for GetStatedbDump: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+// SetTransport changes the transport on the client
+func (a *Client) SetTransport(transport runtime.ClientTransport) {
+ a.transport = transport
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/flow/README.md b/vendor/github.com/cilium/cilium/api/v1/flow/README.md
index bf1bb53ef..bcdfaa0b5 100644
--- a/vendor/github.com/cilium/cilium/api/v1/flow/README.md
+++ b/vendor/github.com/cilium/cilium/api/v1/flow/README.md
@@ -27,6 +27,7 @@
- [Layer7](#flow-Layer7)
- [LostEvent](#flow-LostEvent)
- [NetworkInterface](#flow-NetworkInterface)
+ - [Policy](#flow-Policy)
- [PolicyUpdateNotification](#flow-PolicyUpdateNotification)
- [SCTP](#flow-SCTP)
- [Service](#flow-Service)
@@ -109,7 +110,7 @@
### CiliumEventType
-CiliumEventType from which the flow originated
+CiliumEventType from which the flow originated.
| Field | Type | Label | Description |
@@ -125,8 +126,7 @@ CiliumEventType from which the flow originated
### DNS
-DNS flow. This is basically directly mapped from Cilium's LogRecordDNS:
- https://github.com/cilium/cilium/blob/04f3889d627774f79e56d14ddbc165b3169e2d01/pkg/proxy/accesslog/record.go#L264
+DNS flow. This is basically directly mapped from Cilium's [LogRecordDNS](https://github.com/cilium/cilium/blob/04f3889d627774f79e56d14ddbc165b3169e2d01/pkg/proxy/accesslog/record.go#L264):
| Field | Type | Label | Description |
@@ -242,7 +242,7 @@ DNS flow. This is basically directly mapped from Cilium's LogRecordDNS:
### EventTypeFilter
-EventTypeFilter is a filter describing a particular event type
+EventTypeFilter is a filter describing a particular event type.
| Field | Type | Label | Description |
@@ -296,6 +296,9 @@ EventTypeFilter is a filter describing a particular event type
| socket_cookie | [uint64](#uint64) | | socket_cookie is the Linux kernel socket cookie for this flow. Only applicable to TraceSock notifications, zero for other types |
| cgroup_id | [uint64](#uint64) | | cgroup_id of the process which emitted this event. Only applicable to TraceSock notifications, zero for other types |
| Summary | [string](#string) | | **Deprecated.** This is a temporary workaround to support summary field for pb.Flow without duplicating logic from the old parser. This field will be removed once we fully migrate to the new parser. |
+| extensions | [google.protobuf.Any](#google-protobuf-Any) | | extensions can be used to add arbitrary additional metadata to flows. This can be used to extend functionality for other Hubble compatible APIs, or experiment with new functionality without needing to change the public API. |
+| egress_allowed_by | [Policy](#flow-Policy) | repeated | The CiliumNetworkPolicies allowing the egress of the flow. |
+| ingress_allowed_by | [Policy](#flow-Policy) | repeated | The CiliumNetworkPolicies allowing the ingress of the flow. |
@@ -337,6 +340,7 @@ multiple fields are set, then all fields must match for the filter to match.
| destination_identity | [uint32](#uint32) | repeated | destination_identity filters by the security identity of the destination endpoint. |
| http_method | [string](#string) | repeated | GET, POST, PUT, etc. methods. This type of field is well suited for an enum but every single existing place is using a string already. |
| http_path | [string](#string) | repeated | http_path is a list of regular expressions to filter on the HTTP path. |
+| http_url | [string](#string) | repeated | http_url is a list of regular expressions to filter on the HTTP URL. |
| tcp_flags | [TCPFlags](#flow-TCPFlags) | repeated | tcp_flags filters flows based on TCP header flags |
| node_name | [string](#string) | repeated | node_name is a list of patterns to filter on the node name, e.g. "k8s*", "test-cluster/*.domain.com", "cluster-name/" etc. |
| ip_version | [IPVersion](#flow-IPVersion) | repeated | filter based on IP version (ipv4 or ipv6) |
@@ -350,8 +354,7 @@ multiple fields are set, then all fields must match for the filter to match.
### HTTP
-L7 information for HTTP flows. It corresponds to Cilium's accesslog.LogRecordHTTP type.
- https://github.com/cilium/cilium/blob/728c79e427438ab6f8d9375b62fccd6fed4ace3a/pkg/proxy/accesslog/record.go#L206
+L7 information for HTTP flows. It corresponds to Cilium's [accesslog.LogRecordHTTP](https://github.com/cilium/cilium/blob/728c79e427438ab6f8d9375b62fccd6fed4ace3a/pkg/proxy/accesslog/record.go#L206) type.
| Field | Type | Label | Description |
@@ -458,8 +461,7 @@ L7 information for HTTP flows. It corresponds to Cilium's accesslog.LogRecor
### Kafka
-L7 information for Kafka flows. It corresponds to Cilium's accesslog.LogRecordKafka type.
- https://github.com/cilium/cilium/blob/728c79e427438ab6f8d9375b62fccd6fed4ace3a/pkg/proxy/accesslog/record.go#L229
+L7 information for Kafka flows. It corresponds to Cilium's [accesslog.LogRecordKafka](https://github.com/cilium/cilium/blob/728c79e427438ab6f8d9375b62fccd6fed4ace3a/pkg/proxy/accesslog/record.go#L229) type.
| Field | Type | Label | Description |
@@ -497,8 +499,7 @@ L7 information for Kafka flows. It corresponds to Cilium's accesslog.LogReco
### Layer7
-Message for L7 flow, which roughly corresponds to Cilium's accesslog LogRecord:
- https://github.com/cilium/cilium/blob/728c79e427438ab6f8d9375b62fccd6fed4ace3a/pkg/proxy/accesslog/record.go#L141
+Message for L7 flow, which roughly corresponds to Cilium's accesslog [LogRecord](https://github.com/cilium/cilium/blob/728c79e427438ab6f8d9375b62fccd6fed4ace3a/pkg/proxy/accesslog/record.go#L141):
| Field | Type | Label | Description |
@@ -548,6 +549,24 @@ that happened before the events were captured by Hubble.
+
+
+### Policy
+
+
+
+| Field | Type | Label | Description |
+| ----- | ---- | ----- | ----------- |
+| name | [string](#string) | | |
+| namespace | [string](#string) | | |
+| labels | [string](#string) | repeated | |
+| revision | [uint64](#uint64) | | |
+
+
+
+
+
+
### PolicyUpdateNotification
@@ -709,10 +728,9 @@ that happened before the events were captured by Hubble.
### TraceContext
-TraceContext contains trace context propagation data, ie information about a
+TraceContext contains trace context propagation data, i.e. information about a
distributed trace.
-For more information about trace context, check the W3C Trace Context
-specification: https://www.w3.org/TR/trace-context/
+For more information about trace context, check the [W3C Trace Context specification](https://www.w3.org/TR/trace-context/).
| Field | Type | Label | Description |
@@ -777,7 +795,7 @@ TraceParent identifies the incoming request in a tracing system.
### AgentEventType
AgentEventType is the type of agent event. These values are shared with type
-AgentNotification in pkg/monitor/api/types.go
+AgentNotification in pkg/monitor/api/types.go.
| Name | Number | Description |
| ---- | ------ | ----------- |
@@ -799,7 +817,7 @@ AgentNotification in pkg/monitor/api/types.go
### AuthType
-These types correspond to definitions in pkg/policy/l4.go
+These types correspond to definitions in pkg/policy/l4.go.
| Name | Number | Description |
| ---- | ------ | ----------- |
@@ -900,6 +918,8 @@ These values are shared with pkg/monitor/api/datapath_debug.go and bpf/lib/dbg.h
| DBG_SK_LOOKUP4 | 62 | |
| DBG_SK_LOOKUP6 | 63 | |
| DBG_SK_ASSIGN | 64 | |
+| DBG_L7_LB | 65 | |
+| DBG_SKIP_POLICY | 66 | |
@@ -977,6 +997,9 @@ here.
| INVALID_CLUSTER_ID | 192 | |
| UNSUPPORTED_PROTOCOL_FOR_DSR_ENCAP | 193 | |
| NO_EGRESS_GATEWAY | 194 | |
+| UNENCRYPTED_TRAFFIC | 195 | |
+| TTL_EXCEEDED | 196 | |
+| NO_NODE_ID | 197 | |
@@ -1023,8 +1046,7 @@ EventType are constants are based on the ones from <linux/perf_event.h>.
### L7FlowType
-This enum corresponds to Cilium's L7 accesslog FlowType:
- https://github.com/cilium/cilium/blob/728c79e427438ab6f8d9375b62fccd6fed4ace3a/pkg/proxy/accesslog/record.go#L26
+This enum corresponds to Cilium's L7 accesslog [FlowType](https://github.com/cilium/cilium/blob/728c79e427438ab6f8d9375b62fccd6fed4ace3a/pkg/proxy/accesslog/record.go#L26):
| Name | Number | Description |
| ---- | ------ | ----------- |
diff --git a/vendor/github.com/cilium/cilium/api/v1/flow/flow.pb.go b/vendor/github.com/cilium/cilium/api/v1/flow/flow.pb.go
index 816a98515..c91e31a56 100644
--- a/vendor/github.com/cilium/cilium/api/v1/flow/flow.pb.go
+++ b/vendor/github.com/cilium/cilium/api/v1/flow/flow.pb.go
@@ -3,8 +3,8 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.30.0
-// protoc v4.22.3
+// protoc-gen-go v1.31.0
+// protoc v4.24.0
// source: flow/flow.proto
package flow
@@ -12,6 +12,7 @@ package flow
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ anypb "google.golang.org/protobuf/types/known/anypb"
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
reflect "reflect"
@@ -77,7 +78,7 @@ func (FlowType) EnumDescriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{0}
}
-// These types correspond to definitions in pkg/policy/l4.go
+// These types correspond to definitions in pkg/policy/l4.go.
type AuthType int32
const (
@@ -231,9 +232,7 @@ func (TraceObservationPoint) EnumDescriptor() ([]byte, []int) {
return file_flow_flow_proto_rawDescGZIP(), []int{2}
}
-// This enum corresponds to Cilium's L7 accesslog FlowType:
-//
-// https://github.com/cilium/cilium/blob/728c79e427438ab6f8d9375b62fccd6fed4ace3a/pkg/proxy/accesslog/record.go#L26
+// This enum corresponds to Cilium's L7 accesslog [FlowType](https://github.com/cilium/cilium/blob/728c79e427438ab6f8d9375b62fccd6fed4ace3a/pkg/proxy/accesslog/record.go#L26):
type L7FlowType int32
const (
@@ -486,6 +485,9 @@ const (
DropReason_INVALID_CLUSTER_ID DropReason = 192
DropReason_UNSUPPORTED_PROTOCOL_FOR_DSR_ENCAP DropReason = 193
DropReason_NO_EGRESS_GATEWAY DropReason = 194
+ DropReason_UNENCRYPTED_TRAFFIC DropReason = 195
+ DropReason_TTL_EXCEEDED DropReason = 196
+ DropReason_NO_NODE_ID DropReason = 197
)
// Enum value maps for DropReason.
@@ -556,6 +558,9 @@ var (
192: "INVALID_CLUSTER_ID",
193: "UNSUPPORTED_PROTOCOL_FOR_DSR_ENCAP",
194: "NO_EGRESS_GATEWAY",
+ 195: "UNENCRYPTED_TRAFFIC",
+ 196: "TTL_EXCEEDED",
+ 197: "NO_NODE_ID",
}
DropReason_value = map[string]int32{
"DROP_REASON_UNKNOWN": 0,
@@ -623,6 +628,9 @@ var (
"INVALID_CLUSTER_ID": 192,
"UNSUPPORTED_PROTOCOL_FOR_DSR_ENCAP": 193,
"NO_EGRESS_GATEWAY": 194,
+ "UNENCRYPTED_TRAFFIC": 195,
+ "TTL_EXCEEDED": 196,
+ "NO_NODE_ID": 197,
}
)
@@ -883,7 +891,7 @@ func (LostEventSource) EnumDescriptor() ([]byte, []int) {
}
// AgentEventType is the type of agent event. These values are shared with type
-// AgentNotification in pkg/monitor/api/types.go
+// AgentNotification in pkg/monitor/api/types.go.
type AgentEventType int32
const (
@@ -1085,6 +1093,8 @@ const (
DebugEventType_DBG_SK_LOOKUP4 DebugEventType = 62
DebugEventType_DBG_SK_LOOKUP6 DebugEventType = 63
DebugEventType_DBG_SK_ASSIGN DebugEventType = 64
+ DebugEventType_DBG_L7_LB DebugEventType = 65
+ DebugEventType_DBG_SKIP_POLICY DebugEventType = 66
)
// Enum value maps for DebugEventType.
@@ -1155,6 +1165,8 @@ var (
62: "DBG_SK_LOOKUP4",
63: "DBG_SK_LOOKUP6",
64: "DBG_SK_ASSIGN",
+ 65: "DBG_L7_LB",
+ 66: "DBG_SKIP_POLICY",
}
DebugEventType_value = map[string]int32{
"DBG_EVENT_UNKNOWN": 0,
@@ -1222,6 +1234,8 @@ var (
"DBG_SK_LOOKUP4": 62,
"DBG_SK_LOOKUP6": 63,
"DBG_SK_ASSIGN": 64,
+ "DBG_L7_LB": 65,
+ "DBG_SKIP_POLICY": 66,
}
)
@@ -1335,6 +1349,14 @@ type Flow struct {
//
// Deprecated: Marked as deprecated in flow/flow.proto.
Summary string `protobuf:"bytes,100000,opt,name=Summary,proto3" json:"Summary,omitempty"`
+ // extensions can be used to add arbitrary additional metadata to flows.
+ // This can be used to extend functionality for other Hubble compatible
+ // APIs, or experiment with new functionality without needing to change the public API.
+ Extensions *anypb.Any `protobuf:"bytes,150000,opt,name=extensions,proto3" json:"extensions,omitempty"`
+ // The CiliumNetworkPolicies allowing the egress of the flow.
+ EgressAllowedBy []*Policy `protobuf:"bytes,21001,rep,name=egress_allowed_by,json=egressAllowedBy,proto3" json:"egress_allowed_by,omitempty"`
+ // The CiliumNetworkPolicies allowing the ingress of the flow.
+ IngressAllowedBy []*Policy `protobuf:"bytes,21002,rep,name=ingress_allowed_by,json=ingressAllowedBy,proto3" json:"ingress_allowed_by,omitempty"`
}
func (x *Flow) Reset() {
@@ -1596,6 +1618,27 @@ func (x *Flow) GetSummary() string {
return ""
}
+func (x *Flow) GetExtensions() *anypb.Any {
+ if x != nil {
+ return x.Extensions
+ }
+ return nil
+}
+
+func (x *Flow) GetEgressAllowedBy() []*Policy {
+ if x != nil {
+ return x.EgressAllowedBy
+ }
+ return nil
+}
+
+func (x *Flow) GetIngressAllowedBy() []*Policy {
+ if x != nil {
+ return x.IngressAllowedBy
+ }
+ return nil
+}
+
type Layer4 struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -1720,9 +1763,7 @@ func (*Layer4_ICMPv6) isLayer4_Protocol() {}
func (*Layer4_SCTP) isLayer4_Protocol() {}
-// Message for L7 flow, which roughly corresponds to Cilium's accesslog LogRecord:
-//
-// https://github.com/cilium/cilium/blob/728c79e427438ab6f8d9375b62fccd6fed4ace3a/pkg/proxy/accesslog/record.go#L141
+// Message for L7 flow, which roughly corresponds to Cilium's accesslog [LogRecord](https://github.com/cilium/cilium/blob/728c79e427438ab6f8d9375b62fccd6fed4ace3a/pkg/proxy/accesslog/record.go#L141):
type Layer7 struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -1837,10 +1878,9 @@ func (*Layer7_Http) isLayer7_Record() {}
func (*Layer7_Kafka) isLayer7_Record() {}
-// TraceContext contains trace context propagation data, ie information about a
+// TraceContext contains trace context propagation data, i.e. information about a
// distributed trace.
-// For more information about trace context, check the W3C Trace Context
-// specification: https://www.w3.org/TR/trace-context/
+// For more information about trace context, check the [W3C Trace Context specification](https://www.w3.org/TR/trace-context/).
type TraceContext struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -2604,7 +2644,78 @@ func (x *ICMPv6) GetCode() uint32 {
return 0
}
-// EventTypeFilter is a filter describing a particular event type
+type Policy struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"`
+ Labels []string `protobuf:"bytes,3,rep,name=labels,proto3" json:"labels,omitempty"`
+ Revision uint64 `protobuf:"varint,4,opt,name=revision,proto3" json:"revision,omitempty"`
+}
+
+func (x *Policy) Reset() {
+ *x = Policy{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_flow_flow_proto_msgTypes[15]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Policy) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Policy) ProtoMessage() {}
+
+func (x *Policy) ProtoReflect() protoreflect.Message {
+ mi := &file_flow_flow_proto_msgTypes[15]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Policy.ProtoReflect.Descriptor instead.
+func (*Policy) Descriptor() ([]byte, []int) {
+ return file_flow_flow_proto_rawDescGZIP(), []int{15}
+}
+
+func (x *Policy) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *Policy) GetNamespace() string {
+ if x != nil {
+ return x.Namespace
+ }
+ return ""
+}
+
+func (x *Policy) GetLabels() []string {
+ if x != nil {
+ return x.Labels
+ }
+ return nil
+}
+
+func (x *Policy) GetRevision() uint64 {
+ if x != nil {
+ return x.Revision
+ }
+ return 0
+}
+
+// EventTypeFilter is a filter describing a particular event type.
type EventTypeFilter struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -2624,7 +2735,7 @@ type EventTypeFilter struct {
func (x *EventTypeFilter) Reset() {
*x = EventTypeFilter{}
if protoimpl.UnsafeEnabled {
- mi := &file_flow_flow_proto_msgTypes[15]
+ mi := &file_flow_flow_proto_msgTypes[16]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2637,7 +2748,7 @@ func (x *EventTypeFilter) String() string {
func (*EventTypeFilter) ProtoMessage() {}
func (x *EventTypeFilter) ProtoReflect() protoreflect.Message {
- mi := &file_flow_flow_proto_msgTypes[15]
+ mi := &file_flow_flow_proto_msgTypes[16]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2650,7 +2761,7 @@ func (x *EventTypeFilter) ProtoReflect() protoreflect.Message {
// Deprecated: Use EventTypeFilter.ProtoReflect.Descriptor instead.
func (*EventTypeFilter) Descriptor() ([]byte, []int) {
- return file_flow_flow_proto_rawDescGZIP(), []int{15}
+ return file_flow_flow_proto_rawDescGZIP(), []int{16}
}
func (x *EventTypeFilter) GetType() int32 {
@@ -2674,7 +2785,7 @@ func (x *EventTypeFilter) GetSubType() int32 {
return 0
}
-// CiliumEventType from which the flow originated
+// CiliumEventType from which the flow originated.
type CiliumEventType struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -2693,7 +2804,7 @@ type CiliumEventType struct {
func (x *CiliumEventType) Reset() {
*x = CiliumEventType{}
if protoimpl.UnsafeEnabled {
- mi := &file_flow_flow_proto_msgTypes[16]
+ mi := &file_flow_flow_proto_msgTypes[17]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2706,7 +2817,7 @@ func (x *CiliumEventType) String() string {
func (*CiliumEventType) ProtoMessage() {}
func (x *CiliumEventType) ProtoReflect() protoreflect.Message {
- mi := &file_flow_flow_proto_msgTypes[16]
+ mi := &file_flow_flow_proto_msgTypes[17]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2719,7 +2830,7 @@ func (x *CiliumEventType) ProtoReflect() protoreflect.Message {
// Deprecated: Use CiliumEventType.ProtoReflect.Descriptor instead.
func (*CiliumEventType) Descriptor() ([]byte, []int) {
- return file_flow_flow_proto_rawDescGZIP(), []int{16}
+ return file_flow_flow_proto_rawDescGZIP(), []int{17}
}
func (x *CiliumEventType) GetType() int32 {
@@ -2807,6 +2918,8 @@ type FlowFilter struct {
HttpMethod []string `protobuf:"bytes,21,rep,name=http_method,json=httpMethod,proto3" json:"http_method,omitempty"`
// http_path is a list of regular expressions to filter on the HTTP path.
HttpPath []string `protobuf:"bytes,22,rep,name=http_path,json=httpPath,proto3" json:"http_path,omitempty"`
+ // http_url is a list of regular expressions to filter on the HTTP URL.
+ HttpUrl []string `protobuf:"bytes,31,rep,name=http_url,json=httpUrl,proto3" json:"http_url,omitempty"`
// tcp_flags filters flows based on TCP header flags
TcpFlags []*TCPFlags `protobuf:"bytes,23,rep,name=tcp_flags,json=tcpFlags,proto3" json:"tcp_flags,omitempty"`
// node_name is a list of patterns to filter on the node name, e.g. "k8s*",
@@ -2821,7 +2934,7 @@ type FlowFilter struct {
func (x *FlowFilter) Reset() {
*x = FlowFilter{}
if protoimpl.UnsafeEnabled {
- mi := &file_flow_flow_proto_msgTypes[17]
+ mi := &file_flow_flow_proto_msgTypes[18]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -2834,7 +2947,7 @@ func (x *FlowFilter) String() string {
func (*FlowFilter) ProtoMessage() {}
func (x *FlowFilter) ProtoReflect() protoreflect.Message {
- mi := &file_flow_flow_proto_msgTypes[17]
+ mi := &file_flow_flow_proto_msgTypes[18]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -2847,7 +2960,7 @@ func (x *FlowFilter) ProtoReflect() protoreflect.Message {
// Deprecated: Use FlowFilter.ProtoReflect.Descriptor instead.
func (*FlowFilter) Descriptor() ([]byte, []int) {
- return file_flow_flow_proto_rawDescGZIP(), []int{17}
+ return file_flow_flow_proto_rawDescGZIP(), []int{18}
}
func (x *FlowFilter) GetUuid() []string {
@@ -3032,6 +3145,13 @@ func (x *FlowFilter) GetHttpPath() []string {
return nil
}
+func (x *FlowFilter) GetHttpUrl() []string {
+ if x != nil {
+ return x.HttpUrl
+ }
+ return nil
+}
+
func (x *FlowFilter) GetTcpFlags() []*TCPFlags {
if x != nil {
return x.TcpFlags
@@ -3060,9 +3180,7 @@ func (x *FlowFilter) GetTraceId() []string {
return nil
}
-// DNS flow. This is basically directly mapped from Cilium's LogRecordDNS:
-//
-// https://github.com/cilium/cilium/blob/04f3889d627774f79e56d14ddbc165b3169e2d01/pkg/proxy/accesslog/record.go#L264
+// DNS flow. This is basically directly mapped from Cilium's [LogRecordDNS](https://github.com/cilium/cilium/blob/04f3889d627774f79e56d14ddbc165b3169e2d01/pkg/proxy/accesslog/record.go#L264):
type DNS struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -3096,7 +3214,7 @@ type DNS struct {
func (x *DNS) Reset() {
*x = DNS{}
if protoimpl.UnsafeEnabled {
- mi := &file_flow_flow_proto_msgTypes[18]
+ mi := &file_flow_flow_proto_msgTypes[19]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3109,7 +3227,7 @@ func (x *DNS) String() string {
func (*DNS) ProtoMessage() {}
func (x *DNS) ProtoReflect() protoreflect.Message {
- mi := &file_flow_flow_proto_msgTypes[18]
+ mi := &file_flow_flow_proto_msgTypes[19]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3122,7 +3240,7 @@ func (x *DNS) ProtoReflect() protoreflect.Message {
// Deprecated: Use DNS.ProtoReflect.Descriptor instead.
func (*DNS) Descriptor() ([]byte, []int) {
- return file_flow_flow_proto_rawDescGZIP(), []int{18}
+ return file_flow_flow_proto_rawDescGZIP(), []int{19}
}
func (x *DNS) GetQuery() string {
@@ -3193,7 +3311,7 @@ type HTTPHeader struct {
func (x *HTTPHeader) Reset() {
*x = HTTPHeader{}
if protoimpl.UnsafeEnabled {
- mi := &file_flow_flow_proto_msgTypes[19]
+ mi := &file_flow_flow_proto_msgTypes[20]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3206,7 +3324,7 @@ func (x *HTTPHeader) String() string {
func (*HTTPHeader) ProtoMessage() {}
func (x *HTTPHeader) ProtoReflect() protoreflect.Message {
- mi := &file_flow_flow_proto_msgTypes[19]
+ mi := &file_flow_flow_proto_msgTypes[20]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3219,7 +3337,7 @@ func (x *HTTPHeader) ProtoReflect() protoreflect.Message {
// Deprecated: Use HTTPHeader.ProtoReflect.Descriptor instead.
func (*HTTPHeader) Descriptor() ([]byte, []int) {
- return file_flow_flow_proto_rawDescGZIP(), []int{19}
+ return file_flow_flow_proto_rawDescGZIP(), []int{20}
}
func (x *HTTPHeader) GetKey() string {
@@ -3236,9 +3354,7 @@ func (x *HTTPHeader) GetValue() string {
return ""
}
-// L7 information for HTTP flows. It corresponds to Cilium's accesslog.LogRecordHTTP type.
-//
-// https://github.com/cilium/cilium/blob/728c79e427438ab6f8d9375b62fccd6fed4ace3a/pkg/proxy/accesslog/record.go#L206
+// L7 information for HTTP flows. It corresponds to Cilium's [accesslog.LogRecordHTTP](https://github.com/cilium/cilium/blob/728c79e427438ab6f8d9375b62fccd6fed4ace3a/pkg/proxy/accesslog/record.go#L206) type.
type HTTP struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -3254,7 +3370,7 @@ type HTTP struct {
func (x *HTTP) Reset() {
*x = HTTP{}
if protoimpl.UnsafeEnabled {
- mi := &file_flow_flow_proto_msgTypes[20]
+ mi := &file_flow_flow_proto_msgTypes[21]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3267,7 +3383,7 @@ func (x *HTTP) String() string {
func (*HTTP) ProtoMessage() {}
func (x *HTTP) ProtoReflect() protoreflect.Message {
- mi := &file_flow_flow_proto_msgTypes[20]
+ mi := &file_flow_flow_proto_msgTypes[21]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3280,7 +3396,7 @@ func (x *HTTP) ProtoReflect() protoreflect.Message {
// Deprecated: Use HTTP.ProtoReflect.Descriptor instead.
func (*HTTP) Descriptor() ([]byte, []int) {
- return file_flow_flow_proto_rawDescGZIP(), []int{20}
+ return file_flow_flow_proto_rawDescGZIP(), []int{21}
}
func (x *HTTP) GetCode() uint32 {
@@ -3318,9 +3434,7 @@ func (x *HTTP) GetHeaders() []*HTTPHeader {
return nil
}
-// L7 information for Kafka flows. It corresponds to Cilium's accesslog.LogRecordKafka type.
-//
-// https://github.com/cilium/cilium/blob/728c79e427438ab6f8d9375b62fccd6fed4ace3a/pkg/proxy/accesslog/record.go#L229
+// L7 information for Kafka flows. It corresponds to Cilium's [accesslog.LogRecordKafka](https://github.com/cilium/cilium/blob/728c79e427438ab6f8d9375b62fccd6fed4ace3a/pkg/proxy/accesslog/record.go#L229) type.
type Kafka struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -3336,7 +3450,7 @@ type Kafka struct {
func (x *Kafka) Reset() {
*x = Kafka{}
if protoimpl.UnsafeEnabled {
- mi := &file_flow_flow_proto_msgTypes[21]
+ mi := &file_flow_flow_proto_msgTypes[22]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3349,7 +3463,7 @@ func (x *Kafka) String() string {
func (*Kafka) ProtoMessage() {}
func (x *Kafka) ProtoReflect() protoreflect.Message {
- mi := &file_flow_flow_proto_msgTypes[21]
+ mi := &file_flow_flow_proto_msgTypes[22]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3362,7 +3476,7 @@ func (x *Kafka) ProtoReflect() protoreflect.Message {
// Deprecated: Use Kafka.ProtoReflect.Descriptor instead.
func (*Kafka) Descriptor() ([]byte, []int) {
- return file_flow_flow_proto_rawDescGZIP(), []int{21}
+ return file_flow_flow_proto_rawDescGZIP(), []int{22}
}
func (x *Kafka) GetErrorCode() int32 {
@@ -3412,7 +3526,7 @@ type Service struct {
func (x *Service) Reset() {
*x = Service{}
if protoimpl.UnsafeEnabled {
- mi := &file_flow_flow_proto_msgTypes[22]
+ mi := &file_flow_flow_proto_msgTypes[23]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3425,7 +3539,7 @@ func (x *Service) String() string {
func (*Service) ProtoMessage() {}
func (x *Service) ProtoReflect() protoreflect.Message {
- mi := &file_flow_flow_proto_msgTypes[22]
+ mi := &file_flow_flow_proto_msgTypes[23]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3438,7 +3552,7 @@ func (x *Service) ProtoReflect() protoreflect.Message {
// Deprecated: Use Service.ProtoReflect.Descriptor instead.
func (*Service) Descriptor() ([]byte, []int) {
- return file_flow_flow_proto_rawDescGZIP(), []int{22}
+ return file_flow_flow_proto_rawDescGZIP(), []int{23}
}
func (x *Service) GetName() string {
@@ -3474,7 +3588,7 @@ type LostEvent struct {
func (x *LostEvent) Reset() {
*x = LostEvent{}
if protoimpl.UnsafeEnabled {
- mi := &file_flow_flow_proto_msgTypes[23]
+ mi := &file_flow_flow_proto_msgTypes[24]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3487,7 +3601,7 @@ func (x *LostEvent) String() string {
func (*LostEvent) ProtoMessage() {}
func (x *LostEvent) ProtoReflect() protoreflect.Message {
- mi := &file_flow_flow_proto_msgTypes[23]
+ mi := &file_flow_flow_proto_msgTypes[24]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3500,7 +3614,7 @@ func (x *LostEvent) ProtoReflect() protoreflect.Message {
// Deprecated: Use LostEvent.ProtoReflect.Descriptor instead.
func (*LostEvent) Descriptor() ([]byte, []int) {
- return file_flow_flow_proto_rawDescGZIP(), []int{23}
+ return file_flow_flow_proto_rawDescGZIP(), []int{24}
}
func (x *LostEvent) GetSource() LostEventSource {
@@ -3546,7 +3660,7 @@ type AgentEvent struct {
func (x *AgentEvent) Reset() {
*x = AgentEvent{}
if protoimpl.UnsafeEnabled {
- mi := &file_flow_flow_proto_msgTypes[24]
+ mi := &file_flow_flow_proto_msgTypes[25]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3559,7 +3673,7 @@ func (x *AgentEvent) String() string {
func (*AgentEvent) ProtoMessage() {}
func (x *AgentEvent) ProtoReflect() protoreflect.Message {
- mi := &file_flow_flow_proto_msgTypes[24]
+ mi := &file_flow_flow_proto_msgTypes[25]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3572,7 +3686,7 @@ func (x *AgentEvent) ProtoReflect() protoreflect.Message {
// Deprecated: Use AgentEvent.ProtoReflect.Descriptor instead.
func (*AgentEvent) Descriptor() ([]byte, []int) {
- return file_flow_flow_proto_rawDescGZIP(), []int{24}
+ return file_flow_flow_proto_rawDescGZIP(), []int{25}
}
func (x *AgentEvent) GetType() AgentEventType {
@@ -3713,7 +3827,7 @@ type AgentEventUnknown struct {
func (x *AgentEventUnknown) Reset() {
*x = AgentEventUnknown{}
if protoimpl.UnsafeEnabled {
- mi := &file_flow_flow_proto_msgTypes[25]
+ mi := &file_flow_flow_proto_msgTypes[26]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3726,7 +3840,7 @@ func (x *AgentEventUnknown) String() string {
func (*AgentEventUnknown) ProtoMessage() {}
func (x *AgentEventUnknown) ProtoReflect() protoreflect.Message {
- mi := &file_flow_flow_proto_msgTypes[25]
+ mi := &file_flow_flow_proto_msgTypes[26]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3739,7 +3853,7 @@ func (x *AgentEventUnknown) ProtoReflect() protoreflect.Message {
// Deprecated: Use AgentEventUnknown.ProtoReflect.Descriptor instead.
func (*AgentEventUnknown) Descriptor() ([]byte, []int) {
- return file_flow_flow_proto_rawDescGZIP(), []int{25}
+ return file_flow_flow_proto_rawDescGZIP(), []int{26}
}
func (x *AgentEventUnknown) GetType() string {
@@ -3767,7 +3881,7 @@ type TimeNotification struct {
func (x *TimeNotification) Reset() {
*x = TimeNotification{}
if protoimpl.UnsafeEnabled {
- mi := &file_flow_flow_proto_msgTypes[26]
+ mi := &file_flow_flow_proto_msgTypes[27]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3780,7 +3894,7 @@ func (x *TimeNotification) String() string {
func (*TimeNotification) ProtoMessage() {}
func (x *TimeNotification) ProtoReflect() protoreflect.Message {
- mi := &file_flow_flow_proto_msgTypes[26]
+ mi := &file_flow_flow_proto_msgTypes[27]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3793,7 +3907,7 @@ func (x *TimeNotification) ProtoReflect() protoreflect.Message {
// Deprecated: Use TimeNotification.ProtoReflect.Descriptor instead.
func (*TimeNotification) Descriptor() ([]byte, []int) {
- return file_flow_flow_proto_rawDescGZIP(), []int{26}
+ return file_flow_flow_proto_rawDescGZIP(), []int{27}
}
func (x *TimeNotification) GetTime() *timestamppb.Timestamp {
@@ -3816,7 +3930,7 @@ type PolicyUpdateNotification struct {
func (x *PolicyUpdateNotification) Reset() {
*x = PolicyUpdateNotification{}
if protoimpl.UnsafeEnabled {
- mi := &file_flow_flow_proto_msgTypes[27]
+ mi := &file_flow_flow_proto_msgTypes[28]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3829,7 +3943,7 @@ func (x *PolicyUpdateNotification) String() string {
func (*PolicyUpdateNotification) ProtoMessage() {}
func (x *PolicyUpdateNotification) ProtoReflect() protoreflect.Message {
- mi := &file_flow_flow_proto_msgTypes[27]
+ mi := &file_flow_flow_proto_msgTypes[28]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3842,7 +3956,7 @@ func (x *PolicyUpdateNotification) ProtoReflect() protoreflect.Message {
// Deprecated: Use PolicyUpdateNotification.ProtoReflect.Descriptor instead.
func (*PolicyUpdateNotification) Descriptor() ([]byte, []int) {
- return file_flow_flow_proto_rawDescGZIP(), []int{27}
+ return file_flow_flow_proto_rawDescGZIP(), []int{28}
}
func (x *PolicyUpdateNotification) GetLabels() []string {
@@ -3879,7 +3993,7 @@ type EndpointRegenNotification struct {
func (x *EndpointRegenNotification) Reset() {
*x = EndpointRegenNotification{}
if protoimpl.UnsafeEnabled {
- mi := &file_flow_flow_proto_msgTypes[28]
+ mi := &file_flow_flow_proto_msgTypes[29]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3892,7 +4006,7 @@ func (x *EndpointRegenNotification) String() string {
func (*EndpointRegenNotification) ProtoMessage() {}
func (x *EndpointRegenNotification) ProtoReflect() protoreflect.Message {
- mi := &file_flow_flow_proto_msgTypes[28]
+ mi := &file_flow_flow_proto_msgTypes[29]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3905,7 +4019,7 @@ func (x *EndpointRegenNotification) ProtoReflect() protoreflect.Message {
// Deprecated: Use EndpointRegenNotification.ProtoReflect.Descriptor instead.
func (*EndpointRegenNotification) Descriptor() ([]byte, []int) {
- return file_flow_flow_proto_rawDescGZIP(), []int{28}
+ return file_flow_flow_proto_rawDescGZIP(), []int{29}
}
func (x *EndpointRegenNotification) GetId() uint64 {
@@ -3944,7 +4058,7 @@ type EndpointUpdateNotification struct {
func (x *EndpointUpdateNotification) Reset() {
*x = EndpointUpdateNotification{}
if protoimpl.UnsafeEnabled {
- mi := &file_flow_flow_proto_msgTypes[29]
+ mi := &file_flow_flow_proto_msgTypes[30]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -3957,7 +4071,7 @@ func (x *EndpointUpdateNotification) String() string {
func (*EndpointUpdateNotification) ProtoMessage() {}
func (x *EndpointUpdateNotification) ProtoReflect() protoreflect.Message {
- mi := &file_flow_flow_proto_msgTypes[29]
+ mi := &file_flow_flow_proto_msgTypes[30]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -3970,7 +4084,7 @@ func (x *EndpointUpdateNotification) ProtoReflect() protoreflect.Message {
// Deprecated: Use EndpointUpdateNotification.ProtoReflect.Descriptor instead.
func (*EndpointUpdateNotification) Descriptor() ([]byte, []int) {
- return file_flow_flow_proto_rawDescGZIP(), []int{29}
+ return file_flow_flow_proto_rawDescGZIP(), []int{30}
}
func (x *EndpointUpdateNotification) GetId() uint64 {
@@ -4026,7 +4140,7 @@ type IPCacheNotification struct {
func (x *IPCacheNotification) Reset() {
*x = IPCacheNotification{}
if protoimpl.UnsafeEnabled {
- mi := &file_flow_flow_proto_msgTypes[30]
+ mi := &file_flow_flow_proto_msgTypes[31]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4039,7 +4153,7 @@ func (x *IPCacheNotification) String() string {
func (*IPCacheNotification) ProtoMessage() {}
func (x *IPCacheNotification) ProtoReflect() protoreflect.Message {
- mi := &file_flow_flow_proto_msgTypes[30]
+ mi := &file_flow_flow_proto_msgTypes[31]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4052,7 +4166,7 @@ func (x *IPCacheNotification) ProtoReflect() protoreflect.Message {
// Deprecated: Use IPCacheNotification.ProtoReflect.Descriptor instead.
func (*IPCacheNotification) Descriptor() ([]byte, []int) {
- return file_flow_flow_proto_rawDescGZIP(), []int{30}
+ return file_flow_flow_proto_rawDescGZIP(), []int{31}
}
func (x *IPCacheNotification) GetCidr() string {
@@ -4123,7 +4237,7 @@ type ServiceUpsertNotificationAddr struct {
func (x *ServiceUpsertNotificationAddr) Reset() {
*x = ServiceUpsertNotificationAddr{}
if protoimpl.UnsafeEnabled {
- mi := &file_flow_flow_proto_msgTypes[31]
+ mi := &file_flow_flow_proto_msgTypes[32]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4136,7 +4250,7 @@ func (x *ServiceUpsertNotificationAddr) String() string {
func (*ServiceUpsertNotificationAddr) ProtoMessage() {}
func (x *ServiceUpsertNotificationAddr) ProtoReflect() protoreflect.Message {
- mi := &file_flow_flow_proto_msgTypes[31]
+ mi := &file_flow_flow_proto_msgTypes[32]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4149,7 +4263,7 @@ func (x *ServiceUpsertNotificationAddr) ProtoReflect() protoreflect.Message {
// Deprecated: Use ServiceUpsertNotificationAddr.ProtoReflect.Descriptor instead.
func (*ServiceUpsertNotificationAddr) Descriptor() ([]byte, []int) {
- return file_flow_flow_proto_rawDescGZIP(), []int{31}
+ return file_flow_flow_proto_rawDescGZIP(), []int{32}
}
func (x *ServiceUpsertNotificationAddr) GetIp() string {
@@ -4186,7 +4300,7 @@ type ServiceUpsertNotification struct {
func (x *ServiceUpsertNotification) Reset() {
*x = ServiceUpsertNotification{}
if protoimpl.UnsafeEnabled {
- mi := &file_flow_flow_proto_msgTypes[32]
+ mi := &file_flow_flow_proto_msgTypes[33]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4199,7 +4313,7 @@ func (x *ServiceUpsertNotification) String() string {
func (*ServiceUpsertNotification) ProtoMessage() {}
func (x *ServiceUpsertNotification) ProtoReflect() protoreflect.Message {
- mi := &file_flow_flow_proto_msgTypes[32]
+ mi := &file_flow_flow_proto_msgTypes[33]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4212,7 +4326,7 @@ func (x *ServiceUpsertNotification) ProtoReflect() protoreflect.Message {
// Deprecated: Use ServiceUpsertNotification.ProtoReflect.Descriptor instead.
func (*ServiceUpsertNotification) Descriptor() ([]byte, []int) {
- return file_flow_flow_proto_rawDescGZIP(), []int{32}
+ return file_flow_flow_proto_rawDescGZIP(), []int{33}
}
func (x *ServiceUpsertNotification) GetId() uint32 {
@@ -4290,7 +4404,7 @@ type ServiceDeleteNotification struct {
func (x *ServiceDeleteNotification) Reset() {
*x = ServiceDeleteNotification{}
if protoimpl.UnsafeEnabled {
- mi := &file_flow_flow_proto_msgTypes[33]
+ mi := &file_flow_flow_proto_msgTypes[34]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4303,7 +4417,7 @@ func (x *ServiceDeleteNotification) String() string {
func (*ServiceDeleteNotification) ProtoMessage() {}
func (x *ServiceDeleteNotification) ProtoReflect() protoreflect.Message {
- mi := &file_flow_flow_proto_msgTypes[33]
+ mi := &file_flow_flow_proto_msgTypes[34]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4316,7 +4430,7 @@ func (x *ServiceDeleteNotification) ProtoReflect() protoreflect.Message {
// Deprecated: Use ServiceDeleteNotification.ProtoReflect.Descriptor instead.
func (*ServiceDeleteNotification) Descriptor() ([]byte, []int) {
- return file_flow_flow_proto_rawDescGZIP(), []int{33}
+ return file_flow_flow_proto_rawDescGZIP(), []int{34}
}
func (x *ServiceDeleteNotification) GetId() uint32 {
@@ -4338,7 +4452,7 @@ type NetworkInterface struct {
func (x *NetworkInterface) Reset() {
*x = NetworkInterface{}
if protoimpl.UnsafeEnabled {
- mi := &file_flow_flow_proto_msgTypes[34]
+ mi := &file_flow_flow_proto_msgTypes[35]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4351,7 +4465,7 @@ func (x *NetworkInterface) String() string {
func (*NetworkInterface) ProtoMessage() {}
func (x *NetworkInterface) ProtoReflect() protoreflect.Message {
- mi := &file_flow_flow_proto_msgTypes[34]
+ mi := &file_flow_flow_proto_msgTypes[35]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4364,7 +4478,7 @@ func (x *NetworkInterface) ProtoReflect() protoreflect.Message {
// Deprecated: Use NetworkInterface.ProtoReflect.Descriptor instead.
func (*NetworkInterface) Descriptor() ([]byte, []int) {
- return file_flow_flow_proto_rawDescGZIP(), []int{34}
+ return file_flow_flow_proto_rawDescGZIP(), []int{35}
}
func (x *NetworkInterface) GetIndex() uint32 {
@@ -4399,7 +4513,7 @@ type DebugEvent struct {
func (x *DebugEvent) Reset() {
*x = DebugEvent{}
if protoimpl.UnsafeEnabled {
- mi := &file_flow_flow_proto_msgTypes[35]
+ mi := &file_flow_flow_proto_msgTypes[36]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -4412,7 +4526,7 @@ func (x *DebugEvent) String() string {
func (*DebugEvent) ProtoMessage() {}
func (x *DebugEvent) ProtoReflect() protoreflect.Message {
- mi := &file_flow_flow_proto_msgTypes[35]
+ mi := &file_flow_flow_proto_msgTypes[36]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -4425,7 +4539,7 @@ func (x *DebugEvent) ProtoReflect() protoreflect.Message {
// Deprecated: Use DebugEvent.ProtoReflect.Descriptor instead.
func (*DebugEvent) Descriptor() ([]byte, []int) {
- return file_flow_flow_proto_rawDescGZIP(), []int{35}
+ return file_flow_flow_proto_rawDescGZIP(), []int{36}
}
func (x *DebugEvent) GetType() DebugEventType {
@@ -4488,813 +4602,840 @@ var File_flow_flow_proto protoreflect.FileDescriptor
var file_flow_flow_proto_rawDesc = []byte{
0x0a, 0x0f, 0x66, 0x6c, 0x6f, 0x77, 0x2f, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x12, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72,
- 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
- 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xca, 0x0b, 0x0a, 0x04, 0x46, 0x6c, 0x6f,
- 0x77, 0x12, 0x2e, 0x0a, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x04, 0x74, 0x69, 0x6d,
- 0x65, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, 0x18, 0x22, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x04, 0x75, 0x75, 0x69, 0x64, 0x12, 0x27, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x64, 0x69, 0x63, 0x74,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0d, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x56, 0x65,
- 0x72, 0x64, 0x69, 0x63, 0x74, 0x52, 0x07, 0x76, 0x65, 0x72, 0x64, 0x69, 0x63, 0x74, 0x12, 0x23,
- 0x0a, 0x0b, 0x64, 0x72, 0x6f, 0x70, 0x5f, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x03, 0x20,
- 0x01, 0x28, 0x0d, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0a, 0x64, 0x72, 0x6f, 0x70, 0x52, 0x65, 0x61,
- 0x73, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x09, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x74, 0x79, 0x70, 0x65,
- 0x18, 0x23, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0e, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x41, 0x75,
- 0x74, 0x68, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x61, 0x75, 0x74, 0x68, 0x54, 0x79, 0x70, 0x65,
- 0x12, 0x2a, 0x0a, 0x08, 0x65, 0x74, 0x68, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x18, 0x04, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x45, 0x74, 0x68, 0x65, 0x72, 0x6e,
- 0x65, 0x74, 0x52, 0x08, 0x65, 0x74, 0x68, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x12, 0x18, 0x0a, 0x02,
- 0x49, 0x50, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x08, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e,
- 0x49, 0x50, 0x52, 0x02, 0x49, 0x50, 0x12, 0x1c, 0x0a, 0x02, 0x6c, 0x34, 0x18, 0x06, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x34,
- 0x52, 0x02, 0x6c, 0x34, 0x12, 0x26, 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x08,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x45, 0x6e, 0x64, 0x70,
- 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x30, 0x0a, 0x0b,
- 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x0e, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e,
- 0x74, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x22,
- 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0e, 0x2e, 0x66,
- 0x6c, 0x6f, 0x77, 0x2e, 0x46, 0x6c, 0x6f, 0x77, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x54, 0x79,
- 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18,
- 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x6f, 0x64, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12,
- 0x21, 0x0a, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18,
- 0x0d, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d,
- 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x64,
- 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12,
- 0x1c, 0x0a, 0x02, 0x6c, 0x37, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x66, 0x6c,
- 0x6f, 0x77, 0x2e, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x37, 0x52, 0x02, 0x6c, 0x37, 0x12, 0x18, 0x0a,
- 0x05, 0x72, 0x65, 0x70, 0x6c, 0x79, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01,
- 0x52, 0x05, 0x72, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x34, 0x0a, 0x0a, 0x65, 0x76, 0x65, 0x6e, 0x74,
- 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x66, 0x6c,
- 0x6f, 0x77, 0x2e, 0x43, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79,
- 0x70, 0x65, 0x52, 0x09, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a,
- 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18,
- 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x53, 0x65, 0x72,
- 0x76, 0x69, 0x63, 0x65, 0x52, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x65, 0x72, 0x76,
- 0x69, 0x63, 0x65, 0x12, 0x3e, 0x0a, 0x13, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b,
+ 0x6f, 0x12, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x22, 0xfc, 0x0c, 0x0a, 0x04, 0x46, 0x6c, 0x6f, 0x77, 0x12, 0x2e, 0x0a, 0x04,
+ 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d,
+ 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04,
+ 0x75, 0x75, 0x69, 0x64, 0x18, 0x22, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x75, 0x69, 0x64,
+ 0x12, 0x27, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x64, 0x69, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x0e, 0x32, 0x0d, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x56, 0x65, 0x72, 0x64, 0x69, 0x63, 0x74,
+ 0x52, 0x07, 0x76, 0x65, 0x72, 0x64, 0x69, 0x63, 0x74, 0x12, 0x23, 0x0a, 0x0b, 0x64, 0x72, 0x6f,
+ 0x70, 0x5f, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x42, 0x02,
+ 0x18, 0x01, 0x52, 0x0a, 0x64, 0x72, 0x6f, 0x70, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x2b,
+ 0x0a, 0x09, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x23, 0x20, 0x01, 0x28,
+ 0x0e, 0x32, 0x0e, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x54, 0x79, 0x70,
+ 0x65, 0x52, 0x08, 0x61, 0x75, 0x74, 0x68, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2a, 0x0a, 0x08, 0x65,
+ 0x74, 0x68, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e,
+ 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x45, 0x74, 0x68, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x52, 0x08, 0x65,
+ 0x74, 0x68, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x12, 0x18, 0x0a, 0x02, 0x49, 0x50, 0x18, 0x05, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x08, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x49, 0x50, 0x52, 0x02, 0x49,
+ 0x50, 0x12, 0x1c, 0x0a, 0x02, 0x6c, 0x34, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e,
+ 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x34, 0x52, 0x02, 0x6c, 0x34, 0x12,
+ 0x26, 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x0e, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52,
+ 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x30, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69,
+ 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x66,
+ 0x6c, 0x6f, 0x77, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x0b, 0x64, 0x65,
+ 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x0a, 0x04, 0x54, 0x79, 0x70,
+ 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0e, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x46,
+ 0x6c, 0x6f, 0x77, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a,
+ 0x09, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x08, 0x6e, 0x6f, 0x64, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x09,
+ 0x52, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x2b, 0x0a,
+ 0x11, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d,
+ 0x65, 0x73, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x1c, 0x0a, 0x02, 0x6c, 0x37,
+ 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x4c, 0x61,
+ 0x79, 0x65, 0x72, 0x37, 0x52, 0x02, 0x6c, 0x37, 0x12, 0x18, 0x0a, 0x05, 0x72, 0x65, 0x70, 0x6c,
+ 0x79, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x05, 0x72, 0x65, 0x70,
+ 0x6c, 0x79, 0x12, 0x34, 0x0a, 0x0a, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65,
+ 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x43, 0x69,
+ 0x6c, 0x69, 0x75, 0x6d, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x65,
+ 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b,
0x32, 0x0d, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52,
- 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76,
- 0x69, 0x63, 0x65, 0x12, 0x43, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x5f, 0x64,
- 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16,
- 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x54, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x44, 0x69, 0x72,
- 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x74, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x44,
- 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x11, 0x70, 0x6f, 0x6c, 0x69,
- 0x63, 0x79, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x17, 0x20,
- 0x01, 0x28, 0x0d, 0x52, 0x0f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4d, 0x61, 0x74, 0x63, 0x68,
- 0x54, 0x79, 0x70, 0x65, 0x12, 0x53, 0x0a, 0x17, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x6f, 0x62,
- 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18,
- 0x18, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1b, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x54, 0x72, 0x61,
- 0x63, 0x65, 0x4f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x69,
- 0x6e, 0x74, 0x52, 0x15, 0x74, 0x72, 0x61, 0x63, 0x65, 0x4f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x3a, 0x0a, 0x10, 0x64, 0x72, 0x6f,
- 0x70, 0x5f, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x18, 0x19, 0x20,
- 0x01, 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x44, 0x72, 0x6f, 0x70, 0x52,
- 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x52, 0x0e, 0x64, 0x72, 0x6f, 0x70, 0x52, 0x65, 0x61, 0x73, 0x6f,
- 0x6e, 0x44, 0x65, 0x73, 0x63, 0x12, 0x35, 0x0a, 0x08, 0x69, 0x73, 0x5f, 0x72, 0x65, 0x70, 0x6c,
- 0x79, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61,
- 0x6c, 0x75, 0x65, 0x52, 0x07, 0x69, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x47, 0x0a, 0x13,
- 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x63, 0x61, 0x70, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x70, 0x6f,
- 0x69, 0x6e, 0x74, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x66, 0x6c, 0x6f, 0x77,
- 0x2e, 0x44, 0x65, 0x62, 0x75, 0x67, 0x43, 0x61, 0x70, 0x74, 0x75, 0x72, 0x65, 0x50, 0x6f, 0x69,
- 0x6e, 0x74, 0x52, 0x11, 0x64, 0x65, 0x62, 0x75, 0x67, 0x43, 0x61, 0x70, 0x74, 0x75, 0x72, 0x65,
- 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x34, 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61,
- 0x63, 0x65, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e,
- 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65,
- 0x52, 0x09, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70,
- 0x72, 0x6f, 0x78, 0x79, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x0d, 0x52,
- 0x09, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x37, 0x0a, 0x0d, 0x74, 0x72,
- 0x61, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x1e, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x12, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x43, 0x6f,
- 0x6e, 0x74, 0x65, 0x78, 0x74, 0x52, 0x0c, 0x74, 0x72, 0x61, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x74,
- 0x65, 0x78, 0x74, 0x12, 0x46, 0x0a, 0x10, 0x73, 0x6f, 0x63, 0x6b, 0x5f, 0x78, 0x6c, 0x61, 0x74,
- 0x65, 0x5f, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e,
- 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x54, 0x72, 0x61, 0x6e, 0x73,
- 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x0e, 0x73, 0x6f, 0x63,
- 0x6b, 0x58, 0x6c, 0x61, 0x74, 0x65, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x73,
- 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x63, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x18, 0x20, 0x20, 0x01,
- 0x28, 0x04, 0x52, 0x0c, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x43, 0x6f, 0x6f, 0x6b, 0x69, 0x65,
- 0x12, 0x1b, 0x0a, 0x09, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x69, 0x64, 0x18, 0x21, 0x20,
- 0x01, 0x28, 0x04, 0x52, 0x08, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x49, 0x64, 0x12, 0x1e, 0x0a,
- 0x07, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0xa0, 0x8d, 0x06, 0x20, 0x01, 0x28, 0x09,
- 0x42, 0x02, 0x18, 0x01, 0x52, 0x07, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x4a, 0x04, 0x08,
- 0x07, 0x10, 0x08, 0x4a, 0x04, 0x08, 0x0c, 0x10, 0x0d, 0x4a, 0x04, 0x08, 0x11, 0x10, 0x12, 0x4a,
- 0x04, 0x08, 0x12, 0x10, 0x13, 0x22, 0xc4, 0x01, 0x0a, 0x06, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x34,
- 0x12, 0x1d, 0x0a, 0x03, 0x54, 0x43, 0x50, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x09, 0x2e,
- 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x54, 0x43, 0x50, 0x48, 0x00, 0x52, 0x03, 0x54, 0x43, 0x50, 0x12,
- 0x1d, 0x0a, 0x03, 0x55, 0x44, 0x50, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x66,
- 0x6c, 0x6f, 0x77, 0x2e, 0x55, 0x44, 0x50, 0x48, 0x00, 0x52, 0x03, 0x55, 0x44, 0x50, 0x12, 0x26,
- 0x0a, 0x06, 0x49, 0x43, 0x4d, 0x50, 0x76, 0x34, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c,
- 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x49, 0x43, 0x4d, 0x50, 0x76, 0x34, 0x48, 0x00, 0x52, 0x06,
- 0x49, 0x43, 0x4d, 0x50, 0x76, 0x34, 0x12, 0x26, 0x0a, 0x06, 0x49, 0x43, 0x4d, 0x50, 0x76, 0x36,
- 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x49, 0x43,
- 0x4d, 0x50, 0x76, 0x36, 0x48, 0x00, 0x52, 0x06, 0x49, 0x43, 0x4d, 0x50, 0x76, 0x36, 0x12, 0x20,
- 0x0a, 0x04, 0x53, 0x43, 0x54, 0x50, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x66,
- 0x6c, 0x6f, 0x77, 0x2e, 0x53, 0x43, 0x54, 0x50, 0x48, 0x00, 0x52, 0x04, 0x53, 0x43, 0x54, 0x50,
- 0x42, 0x0a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x22, 0xbd, 0x01, 0x0a,
- 0x06, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x37, 0x12, 0x24, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x4c, 0x37, 0x46,
- 0x6c, 0x6f, 0x77, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1d, 0x0a,
- 0x0a, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28,
- 0x04, 0x52, 0x09, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4e, 0x73, 0x12, 0x1d, 0x0a, 0x03,
- 0x64, 0x6e, 0x73, 0x18, 0x64, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x66, 0x6c, 0x6f, 0x77,
- 0x2e, 0x44, 0x4e, 0x53, 0x48, 0x00, 0x52, 0x03, 0x64, 0x6e, 0x73, 0x12, 0x20, 0x0a, 0x04, 0x68,
- 0x74, 0x74, 0x70, 0x18, 0x65, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x66, 0x6c, 0x6f, 0x77,
- 0x2e, 0x48, 0x54, 0x54, 0x50, 0x48, 0x00, 0x52, 0x04, 0x68, 0x74, 0x74, 0x70, 0x12, 0x23, 0x0a,
- 0x05, 0x6b, 0x61, 0x66, 0x6b, 0x61, 0x18, 0x66, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x66,
- 0x6c, 0x6f, 0x77, 0x2e, 0x4b, 0x61, 0x66, 0x6b, 0x61, 0x48, 0x00, 0x52, 0x05, 0x6b, 0x61, 0x66,
- 0x6b, 0x61, 0x42, 0x08, 0x0a, 0x06, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x22, 0x39, 0x0a, 0x0c,
- 0x54, 0x72, 0x61, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x29, 0x0a, 0x06,
- 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x66,
- 0x6c, 0x6f, 0x77, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x52,
- 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x22, 0x28, 0x0a, 0x0b, 0x54, 0x72, 0x61, 0x63, 0x65,
- 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f,
- 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x72, 0x61, 0x63, 0x65, 0x49,
- 0x64, 0x22, 0xb5, 0x01, 0x0a, 0x08, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x0e,
- 0x0a, 0x02, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x02, 0x49, 0x44, 0x12, 0x1a,
- 0x0a, 0x08, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d,
- 0x52, 0x08, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61,
- 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e,
- 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65,
- 0x6c, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73,
- 0x12, 0x19, 0x0a, 0x08, 0x70, 0x6f, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x07, 0x70, 0x6f, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2c, 0x0a, 0x09, 0x77,
- 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e,
- 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x09,
- 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x22, 0x32, 0x0a, 0x08, 0x57, 0x6f, 0x72,
- 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6b, 0x69, 0x6e,
- 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0x77, 0x0a,
- 0x03, 0x54, 0x43, 0x50, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x70,
- 0x6f, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63,
- 0x65, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52,
- 0x0f, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x72, 0x74,
- 0x12, 0x24, 0x0a, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x0e, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x54, 0x43, 0x50, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x52,
- 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x22, 0x8b, 0x01, 0x0a, 0x02, 0x49, 0x50, 0x12, 0x16, 0x0a,
+ 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x3e,
+ 0x0a, 0x13, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65,
+ 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x66, 0x6c,
+ 0x6f, 0x77, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x12, 0x64, 0x65, 0x73, 0x74,
+ 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x43,
+ 0x0a, 0x11, 0x74, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74,
+ 0x69, 0x6f, 0x6e, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x66, 0x6c, 0x6f, 0x77,
+ 0x2e, 0x54, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f,
+ 0x6e, 0x52, 0x10, 0x74, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74,
+ 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x11, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x6d, 0x61,
+ 0x74, 0x63, 0x68, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0f,
+ 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x54, 0x79, 0x70, 0x65, 0x12,
+ 0x53, 0x0a, 0x17, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0e,
+ 0x32, 0x1b, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x4f, 0x62, 0x73,
+ 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x15, 0x74,
+ 0x72, 0x61, 0x63, 0x65, 0x4f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50,
+ 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x3a, 0x0a, 0x10, 0x64, 0x72, 0x6f, 0x70, 0x5f, 0x72, 0x65, 0x61,
+ 0x73, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x18, 0x19, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x10,
+ 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x44, 0x72, 0x6f, 0x70, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e,
+ 0x52, 0x0e, 0x64, 0x72, 0x6f, 0x70, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x44, 0x65, 0x73, 0x63,
+ 0x12, 0x35, 0x0a, 0x08, 0x69, 0x73, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x79, 0x18, 0x1a, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x07,
+ 0x69, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x47, 0x0a, 0x13, 0x64, 0x65, 0x62, 0x75, 0x67,
+ 0x5f, 0x63, 0x61, 0x70, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x1b,
+ 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x44, 0x65, 0x62, 0x75,
+ 0x67, 0x43, 0x61, 0x70, 0x74, 0x75, 0x72, 0x65, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x11, 0x64,
+ 0x65, 0x62, 0x75, 0x67, 0x43, 0x61, 0x70, 0x74, 0x75, 0x72, 0x65, 0x50, 0x6f, 0x69, 0x6e, 0x74,
+ 0x12, 0x34, 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x18, 0x1c, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x4e, 0x65, 0x74, 0x77, 0x6f,
+ 0x72, 0x6b, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x52, 0x09, 0x69, 0x6e, 0x74,
+ 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x5f,
+ 0x70, 0x6f, 0x72, 0x74, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x78,
+ 0x79, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x37, 0x0a, 0x0d, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x63,
+ 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x1e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x66,
+ 0x6c, 0x6f, 0x77, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74,
+ 0x52, 0x0c, 0x74, 0x72, 0x61, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x46,
+ 0x0a, 0x10, 0x73, 0x6f, 0x63, 0x6b, 0x5f, 0x78, 0x6c, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x6f, 0x69,
+ 0x6e, 0x74, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e,
+ 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x6c, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x0e, 0x73, 0x6f, 0x63, 0x6b, 0x58, 0x6c, 0x61, 0x74,
+ 0x65, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74,
+ 0x5f, 0x63, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x18, 0x20, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x73,
+ 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x43, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x63,
+ 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x69, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08,
+ 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x07, 0x53, 0x75, 0x6d, 0x6d,
+ 0x61, 0x72, 0x79, 0x18, 0xa0, 0x8d, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52,
+ 0x07, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x36, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65,
+ 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xf0, 0x93, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73,
+ 0x12, 0x3a, 0x0a, 0x11, 0x65, 0x67, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x61, 0x6c, 0x6c, 0x6f, 0x77,
+ 0x65, 0x64, 0x5f, 0x62, 0x79, 0x18, 0x89, 0xa4, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e,
+ 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0f, 0x65, 0x67, 0x72,
+ 0x65, 0x73, 0x73, 0x41, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x42, 0x79, 0x12, 0x3c, 0x0a, 0x12,
+ 0x69, 0x6e, 0x67, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x5f,
+ 0x62, 0x79, 0x18, 0x8a, 0xa4, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x66, 0x6c, 0x6f,
+ 0x77, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x10, 0x69, 0x6e, 0x67, 0x72, 0x65, 0x73,
+ 0x73, 0x41, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x42, 0x79, 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08,
+ 0x4a, 0x04, 0x08, 0x0c, 0x10, 0x0d, 0x4a, 0x04, 0x08, 0x11, 0x10, 0x12, 0x4a, 0x04, 0x08, 0x12,
+ 0x10, 0x13, 0x22, 0xc4, 0x01, 0x0a, 0x06, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x34, 0x12, 0x1d, 0x0a,
+ 0x03, 0x54, 0x43, 0x50, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x66, 0x6c, 0x6f,
+ 0x77, 0x2e, 0x54, 0x43, 0x50, 0x48, 0x00, 0x52, 0x03, 0x54, 0x43, 0x50, 0x12, 0x1d, 0x0a, 0x03,
+ 0x55, 0x44, 0x50, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x66, 0x6c, 0x6f, 0x77,
+ 0x2e, 0x55, 0x44, 0x50, 0x48, 0x00, 0x52, 0x03, 0x55, 0x44, 0x50, 0x12, 0x26, 0x0a, 0x06, 0x49,
+ 0x43, 0x4d, 0x50, 0x76, 0x34, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x66, 0x6c,
+ 0x6f, 0x77, 0x2e, 0x49, 0x43, 0x4d, 0x50, 0x76, 0x34, 0x48, 0x00, 0x52, 0x06, 0x49, 0x43, 0x4d,
+ 0x50, 0x76, 0x34, 0x12, 0x26, 0x0a, 0x06, 0x49, 0x43, 0x4d, 0x50, 0x76, 0x36, 0x18, 0x04, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x49, 0x43, 0x4d, 0x50, 0x76,
+ 0x36, 0x48, 0x00, 0x52, 0x06, 0x49, 0x43, 0x4d, 0x50, 0x76, 0x36, 0x12, 0x20, 0x0a, 0x04, 0x53,
+ 0x43, 0x54, 0x50, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x66, 0x6c, 0x6f, 0x77,
+ 0x2e, 0x53, 0x43, 0x54, 0x50, 0x48, 0x00, 0x52, 0x04, 0x53, 0x43, 0x54, 0x50, 0x42, 0x0a, 0x0a,
+ 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x22, 0xbd, 0x01, 0x0a, 0x06, 0x4c, 0x61,
+ 0x79, 0x65, 0x72, 0x37, 0x12, 0x24, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x4c, 0x37, 0x46, 0x6c, 0x6f, 0x77,
+ 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, 0x61,
+ 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09,
+ 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4e, 0x73, 0x12, 0x1d, 0x0a, 0x03, 0x64, 0x6e, 0x73,
+ 0x18, 0x64, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x44, 0x4e,
+ 0x53, 0x48, 0x00, 0x52, 0x03, 0x64, 0x6e, 0x73, 0x12, 0x20, 0x0a, 0x04, 0x68, 0x74, 0x74, 0x70,
+ 0x18, 0x65, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x48, 0x54,
+ 0x54, 0x50, 0x48, 0x00, 0x52, 0x04, 0x68, 0x74, 0x74, 0x70, 0x12, 0x23, 0x0a, 0x05, 0x6b, 0x61,
+ 0x66, 0x6b, 0x61, 0x18, 0x66, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x66, 0x6c, 0x6f, 0x77,
+ 0x2e, 0x4b, 0x61, 0x66, 0x6b, 0x61, 0x48, 0x00, 0x52, 0x05, 0x6b, 0x61, 0x66, 0x6b, 0x61, 0x42,
+ 0x08, 0x0a, 0x06, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x22, 0x39, 0x0a, 0x0c, 0x54, 0x72, 0x61,
+ 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x29, 0x0a, 0x06, 0x70, 0x61, 0x72,
+ 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x66, 0x6c, 0x6f, 0x77,
+ 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x70, 0x61,
+ 0x72, 0x65, 0x6e, 0x74, 0x22, 0x28, 0x0a, 0x0b, 0x54, 0x72, 0x61, 0x63, 0x65, 0x50, 0x61, 0x72,
+ 0x65, 0x6e, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x72, 0x61, 0x63, 0x65, 0x49, 0x64, 0x22, 0xb5,
+ 0x01, 0x0a, 0x08, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x49,
+ 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x02, 0x49, 0x44, 0x12, 0x1a, 0x0a, 0x08, 0x69,
+ 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x69,
+ 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73,
+ 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65,
+ 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18,
+ 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x19, 0x0a,
+ 0x08, 0x70, 0x6f, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x07, 0x70, 0x6f, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2c, 0x0a, 0x09, 0x77, 0x6f, 0x72, 0x6b,
+ 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x66, 0x6c,
+ 0x6f, 0x77, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x09, 0x77, 0x6f, 0x72,
+ 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x22, 0x32, 0x0a, 0x08, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f,
+ 0x61, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0x77, 0x0a, 0x03, 0x54, 0x43,
+ 0x50, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x70, 0x6f, 0x72, 0x74,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x6f,
+ 0x72, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0f, 0x64, 0x65,
+ 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x24, 0x0a,
+ 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x66,
+ 0x6c, 0x6f, 0x77, 0x2e, 0x54, 0x43, 0x50, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x52, 0x05, 0x66, 0x6c,
+ 0x61, 0x67, 0x73, 0x22, 0x8b, 0x01, 0x0a, 0x02, 0x49, 0x50, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2d, 0x0a, 0x09, 0x69, 0x70, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f,
+ 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0f, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x49,
+ 0x50, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x69, 0x70, 0x56, 0x65, 0x72, 0x73,
+ 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64,
+ 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x65,
+ 0x64, 0x22, 0x44, 0x0a, 0x08, 0x45, 0x74, 0x68, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x12, 0x16, 0x0a,
0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73,
0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74,
- 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2d, 0x0a, 0x09, 0x69, 0x70, 0x56, 0x65, 0x72,
- 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0f, 0x2e, 0x66, 0x6c, 0x6f,
- 0x77, 0x2e, 0x49, 0x50, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x69, 0x70, 0x56,
- 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70,
- 0x74, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x65, 0x6e, 0x63, 0x72, 0x79,
- 0x70, 0x74, 0x65, 0x64, 0x22, 0x44, 0x0a, 0x08, 0x45, 0x74, 0x68, 0x65, 0x72, 0x6e, 0x65, 0x74,
- 0x12, 0x16, 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74,
- 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64,
- 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xaa, 0x01, 0x0a, 0x08, 0x54,
- 0x43, 0x50, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x46, 0x49, 0x4e, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x46, 0x49, 0x4e, 0x12, 0x10, 0x0a, 0x03, 0x53, 0x59, 0x4e,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x53, 0x59, 0x4e, 0x12, 0x10, 0x0a, 0x03, 0x52,
- 0x53, 0x54, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x52, 0x53, 0x54, 0x12, 0x10, 0x0a,
- 0x03, 0x50, 0x53, 0x48, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x50, 0x53, 0x48, 0x12,
- 0x10, 0x0a, 0x03, 0x41, 0x43, 0x4b, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x41, 0x43,
- 0x4b, 0x12, 0x10, 0x0a, 0x03, 0x55, 0x52, 0x47, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03,
- 0x55, 0x52, 0x47, 0x12, 0x10, 0x0a, 0x03, 0x45, 0x43, 0x45, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08,
- 0x52, 0x03, 0x45, 0x43, 0x45, 0x12, 0x10, 0x0a, 0x03, 0x43, 0x57, 0x52, 0x18, 0x08, 0x20, 0x01,
- 0x28, 0x08, 0x52, 0x03, 0x43, 0x57, 0x52, 0x12, 0x0e, 0x0a, 0x02, 0x4e, 0x53, 0x18, 0x09, 0x20,
- 0x01, 0x28, 0x08, 0x52, 0x02, 0x4e, 0x53, 0x22, 0x51, 0x0a, 0x03, 0x55, 0x44, 0x50, 0x12, 0x1f,
- 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x6f, 0x72, 0x74, 0x12,
- 0x29, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70,
- 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x74, 0x69,
- 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x72, 0x74, 0x22, 0x52, 0x0a, 0x04, 0x53, 0x43,
- 0x54, 0x50, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x70, 0x6f, 0x72,
- 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50,
+ 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xaa, 0x01, 0x0a, 0x08, 0x54, 0x43, 0x50, 0x46,
+ 0x6c, 0x61, 0x67, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x46, 0x49, 0x4e, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x08, 0x52, 0x03, 0x46, 0x49, 0x4e, 0x12, 0x10, 0x0a, 0x03, 0x53, 0x59, 0x4e, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x08, 0x52, 0x03, 0x53, 0x59, 0x4e, 0x12, 0x10, 0x0a, 0x03, 0x52, 0x53, 0x54, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x52, 0x53, 0x54, 0x12, 0x10, 0x0a, 0x03, 0x50, 0x53,
+ 0x48, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x50, 0x53, 0x48, 0x12, 0x10, 0x0a, 0x03,
+ 0x41, 0x43, 0x4b, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x41, 0x43, 0x4b, 0x12, 0x10,
+ 0x0a, 0x03, 0x55, 0x52, 0x47, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x55, 0x52, 0x47,
+ 0x12, 0x10, 0x0a, 0x03, 0x45, 0x43, 0x45, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x45,
+ 0x43, 0x45, 0x12, 0x10, 0x0a, 0x03, 0x43, 0x57, 0x52, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52,
+ 0x03, 0x43, 0x57, 0x52, 0x12, 0x0e, 0x0a, 0x02, 0x4e, 0x53, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08,
+ 0x52, 0x02, 0x4e, 0x53, 0x22, 0x51, 0x0a, 0x03, 0x55, 0x44, 0x50, 0x12, 0x1f, 0x0a, 0x0b, 0x73,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d,
+ 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x29, 0x0a, 0x10,
+ 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x72, 0x74,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x72, 0x74, 0x22, 0x52, 0x0a, 0x04, 0x53, 0x43, 0x54, 0x50, 0x12,
+ 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x6f, 0x72, 0x74,
+ 0x12, 0x29, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
+ 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x74,
+ 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x72, 0x74, 0x22, 0x30, 0x0a, 0x06, 0x49,
+ 0x43, 0x4d, 0x50, 0x76, 0x34, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x0d, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64,
+ 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x22, 0x30, 0x0a,
+ 0x06, 0x49, 0x43, 0x4d, 0x50, 0x76, 0x36, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x63,
+ 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x22,
+ 0x6e, 0x0a, 0x06, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d,
+ 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a,
+ 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6c,
+ 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6c, 0x61, 0x62,
+ 0x65, 0x6c, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18,
+ 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x22,
+ 0x66, 0x0a, 0x0f, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x46, 0x69, 0x6c, 0x74,
+ 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05,
+ 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f,
+ 0x73, 0x75, 0x62, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c,
+ 0x6d, 0x61, 0x74, 0x63, 0x68, 0x53, 0x75, 0x62, 0x54, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, 0x08,
+ 0x73, 0x75, 0x62, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07,
+ 0x73, 0x75, 0x62, 0x54, 0x79, 0x70, 0x65, 0x22, 0x40, 0x0a, 0x0f, 0x43, 0x69, 0x6c, 0x69, 0x75,
+ 0x6d, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79,
+ 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x19,
+ 0x0a, 0x08, 0x73, 0x75, 0x62, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05,
+ 0x52, 0x07, 0x73, 0x75, 0x62, 0x54, 0x79, 0x70, 0x65, 0x22, 0xcf, 0x09, 0x0a, 0x0a, 0x46, 0x6c,
+ 0x6f, 0x77, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64,
+ 0x18, 0x1d, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, 0x12, 0x1b, 0x0a, 0x09,
+ 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x70, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52,
+ 0x08, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x70, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x5f, 0x70, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x73,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x6f, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x5f, 0x66, 0x71, 0x64, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x73,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x71, 0x64, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52,
+ 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x25, 0x0a, 0x0e,
+ 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x10,
+ 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x12, 0x37, 0x0a, 0x0f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x77, 0x6f,
+ 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x1a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x66,
+ 0x6c, 0x6f, 0x77, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x0e, 0x73, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x25, 0x0a, 0x0e,
+ 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x70, 0x18, 0x03,
+ 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x49, 0x70, 0x12, 0x27, 0x0a, 0x0f, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x64, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x64, 0x65,
+ 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x64, 0x12, 0x29, 0x0a, 0x10,
+ 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66, 0x71, 0x64, 0x6e,
+ 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x46, 0x71, 0x64, 0x6e, 0x12, 0x2b, 0x0a, 0x11, 0x64, 0x65, 0x73, 0x74, 0x69,
+ 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x0b, 0x20, 0x03,
+ 0x28, 0x09, 0x52, 0x10, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c,
+ 0x61, 0x62, 0x65, 0x6c, 0x12, 0x2f, 0x0a, 0x13, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x11, 0x20, 0x03, 0x28,
+ 0x09, 0x52, 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65,
+ 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x41, 0x0a, 0x14, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x1b, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x6c,
+ 0x6f, 0x61, 0x64, 0x52, 0x13, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x43, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x66,
+ 0x66, 0x69, 0x63, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x1e, 0x20,
+ 0x03, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x54, 0x72, 0x61, 0x66, 0x66,
+ 0x69, 0x63, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x74, 0x72, 0x61,
+ 0x66, 0x66, 0x69, 0x63, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x0a,
+ 0x07, 0x76, 0x65, 0x72, 0x64, 0x69, 0x63, 0x74, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x0d,
+ 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x56, 0x65, 0x72, 0x64, 0x69, 0x63, 0x74, 0x52, 0x07, 0x76,
+ 0x65, 0x72, 0x64, 0x69, 0x63, 0x74, 0x12, 0x34, 0x0a, 0x0a, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f,
+ 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x66, 0x6c, 0x6f,
+ 0x77, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65,
+ 0x72, 0x52, 0x09, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x28, 0x0a, 0x10,
+ 0x68, 0x74, 0x74, 0x70, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65,
+ 0x18, 0x09, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x68, 0x74, 0x74, 0x70, 0x53, 0x74, 0x61, 0x74,
+ 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63,
+ 0x6f, 0x6c, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63,
+ 0x6f, 0x6c, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x70, 0x6f, 0x72,
+ 0x74, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50,
0x6f, 0x72, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0f, 0x64,
- 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x72, 0x74, 0x22, 0x30,
- 0x0a, 0x06, 0x49, 0x43, 0x4d, 0x50, 0x76, 0x34, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04,
- 0x63, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65,
- 0x22, 0x30, 0x0a, 0x06, 0x49, 0x43, 0x4d, 0x50, 0x76, 0x36, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79,
- 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x12,
- 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f,
- 0x64, 0x65, 0x22, 0x66, 0x0a, 0x0f, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x46,
- 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x05, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x6d, 0x61, 0x74,
- 0x63, 0x68, 0x5f, 0x73, 0x75, 0x62, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
- 0x08, 0x52, 0x0c, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x53, 0x75, 0x62, 0x54, 0x79, 0x70, 0x65, 0x12,
- 0x19, 0x0a, 0x08, 0x73, 0x75, 0x62, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28,
- 0x05, 0x52, 0x07, 0x73, 0x75, 0x62, 0x54, 0x79, 0x70, 0x65, 0x22, 0x40, 0x0a, 0x0f, 0x43, 0x69,
- 0x6c, 0x69, 0x75, 0x6d, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a,
- 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x74, 0x79, 0x70,
- 0x65, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x75, 0x62, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x05, 0x52, 0x07, 0x73, 0x75, 0x62, 0x54, 0x79, 0x70, 0x65, 0x22, 0xb4, 0x09, 0x0a,
- 0x0a, 0x46, 0x6c, 0x6f, 0x77, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x75,
- 0x75, 0x69, 0x64, 0x18, 0x1d, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, 0x12,
- 0x1b, 0x0a, 0x09, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x70, 0x18, 0x01, 0x20, 0x03,
- 0x28, 0x09, 0x52, 0x08, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x70, 0x12, 0x1d, 0x0a, 0x0a,
- 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x70, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09,
- 0x52, 0x09, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x6f, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x73,
- 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x71, 0x64, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09,
- 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x71, 0x64, 0x6e, 0x12, 0x21, 0x0a, 0x0c,
- 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x0a, 0x20, 0x03,
- 0x28, 0x09, 0x52, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12,
- 0x25, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63,
- 0x65, 0x18, 0x10, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53,
- 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x37, 0x0a, 0x0f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
- 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x1a, 0x20, 0x03, 0x28, 0x0b, 0x32,
- 0x0e, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x52,
- 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x12,
- 0x25, 0x0a, 0x0e, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69,
- 0x70, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x70, 0x12, 0x27, 0x0a, 0x0f, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x64, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52,
- 0x0e, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x64, 0x12,
- 0x29, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66,
- 0x71, 0x64, 0x6e, 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x74, 0x69,
- 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x71, 0x64, 0x6e, 0x12, 0x2b, 0x0a, 0x11, 0x64, 0x65,
- 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18,
- 0x0b, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x2f, 0x0a, 0x13, 0x64, 0x65, 0x73, 0x74, 0x69,
- 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x11,
- 0x20, 0x03, 0x28, 0x09, 0x52, 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x41, 0x0a, 0x14, 0x64, 0x65, 0x73, 0x74,
- 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64,
- 0x18, 0x1b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x57, 0x6f,
- 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x13, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x43, 0x0a, 0x11, 0x74,
- 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
- 0x18, 0x1e, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x54, 0x72,
- 0x61, 0x66, 0x66, 0x69, 0x63, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10,
- 0x74, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
- 0x12, 0x27, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x64, 0x69, 0x63, 0x74, 0x18, 0x05, 0x20, 0x03, 0x28,
- 0x0e, 0x32, 0x0d, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x56, 0x65, 0x72, 0x64, 0x69, 0x63, 0x74,
- 0x52, 0x07, 0x76, 0x65, 0x72, 0x64, 0x69, 0x63, 0x74, 0x12, 0x34, 0x0a, 0x0a, 0x65, 0x76, 0x65,
- 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e,
- 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x46, 0x69,
- 0x6c, 0x74, 0x65, 0x72, 0x52, 0x09, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12,
- 0x28, 0x0a, 0x10, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63,
- 0x6f, 0x64, 0x65, 0x18, 0x09, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x68, 0x74, 0x74, 0x70, 0x53,
- 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f,
- 0x70, 0x6f, 0x72, 0x74, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72,
- 0x63, 0x65, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x09,
- 0x52, 0x0f, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x72,
- 0x74, 0x12, 0x14, 0x0a, 0x05, 0x72, 0x65, 0x70, 0x6c, 0x79, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x08,
- 0x52, 0x05, 0x72, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x6e, 0x73, 0x5f, 0x71,
- 0x75, 0x65, 0x72, 0x79, 0x18, 0x12, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x64, 0x6e, 0x73, 0x51,
- 0x75, 0x65, 0x72, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69,
- 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x13, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x0e, 0x73,
- 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x31, 0x0a,
- 0x14, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x65,
- 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x13, 0x64, 0x65, 0x73,
- 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79,
- 0x12, 0x1f, 0x0a, 0x0b, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18,
- 0x15, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x68, 0x74, 0x74, 0x70, 0x4d, 0x65, 0x74, 0x68, 0x6f,
- 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x16,
- 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x68, 0x74, 0x74, 0x70, 0x50, 0x61, 0x74, 0x68, 0x12, 0x2b,
- 0x0a, 0x09, 0x74, 0x63, 0x70, 0x5f, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x17, 0x20, 0x03, 0x28,
- 0x0b, 0x32, 0x0e, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x54, 0x43, 0x50, 0x46, 0x6c, 0x61, 0x67,
- 0x73, 0x52, 0x08, 0x74, 0x63, 0x70, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6e,
- 0x6f, 0x64, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x18, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08,
- 0x6e, 0x6f, 0x64, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2e, 0x0a, 0x0a, 0x69, 0x70, 0x5f, 0x76,
- 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x19, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x0f, 0x2e, 0x66,
- 0x6c, 0x6f, 0x77, 0x2e, 0x49, 0x50, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x69,
- 0x70, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x72, 0x61, 0x63,
- 0x65, 0x5f, 0x69, 0x64, 0x18, 0x1c, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x74, 0x72, 0x61, 0x63,
- 0x65, 0x49, 0x64, 0x22, 0xce, 0x01, 0x0a, 0x03, 0x44, 0x4e, 0x53, 0x12, 0x14, 0x0a, 0x05, 0x71,
- 0x75, 0x65, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72,
- 0x79, 0x12, 0x10, 0x0a, 0x03, 0x69, 0x70, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x03,
- 0x69, 0x70, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d,
- 0x52, 0x03, 0x74, 0x74, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18,
- 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x63, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x2d, 0x0a,
- 0x12, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x6f, 0x75,
- 0x72, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x6f, 0x62, 0x73, 0x65, 0x72,
- 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05,
- 0x72, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x72, 0x63, 0x6f,
- 0x64, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x71, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03,
- 0x28, 0x09, 0x52, 0x06, 0x71, 0x74, 0x79, 0x70, 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x72,
- 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x72, 0x72, 0x74,
- 0x79, 0x70, 0x65, 0x73, 0x22, 0x34, 0x0a, 0x0a, 0x48, 0x54, 0x54, 0x50, 0x48, 0x65, 0x61, 0x64,
- 0x65, 0x72, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x8c, 0x01, 0x0a, 0x04, 0x48,
- 0x54, 0x54, 0x50, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x0d, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f,
- 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12,
- 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72,
- 0x6c, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x04, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x2a, 0x0a,
- 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10,
- 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x48, 0x54, 0x54, 0x50, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72,
- 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x22, 0x9d, 0x01, 0x0a, 0x05, 0x4b, 0x61,
- 0x66, 0x6b, 0x61, 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x63, 0x6f, 0x64,
- 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f,
- 0x64, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f,
- 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73,
- 0x69, 0x6f, 0x6e, 0x12, 0x17, 0x0a, 0x07, 0x61, 0x70, 0x69, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x03,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x61, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x12, 0x25, 0x0a, 0x0e,
- 0x63, 0x6f, 0x72, 0x72, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x04,
- 0x20, 0x01, 0x28, 0x05, 0x52, 0x0d, 0x63, 0x6f, 0x72, 0x72, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x05, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x22, 0x3b, 0x0a, 0x07, 0x53, 0x65, 0x72,
- 0x76, 0x69, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65,
- 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d,
- 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x91, 0x01, 0x0a, 0x09, 0x4c, 0x6f, 0x73, 0x74, 0x45,
- 0x76, 0x65, 0x6e, 0x74, 0x12, 0x2d, 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x4c, 0x6f, 0x73, 0x74,
- 0x45, 0x76, 0x65, 0x6e, 0x74, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x06, 0x73, 0x6f, 0x75,
- 0x72, 0x63, 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x75, 0x6d, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74,
- 0x73, 0x5f, 0x6c, 0x6f, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x6e, 0x75,
- 0x6d, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x4c, 0x6f, 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x03, 0x63,
- 0x70, 0x75, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x33, 0x32,
- 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x03, 0x63, 0x70, 0x75, 0x22, 0xf6, 0x04, 0x0a, 0x0a, 0x41,
- 0x67, 0x65, 0x6e, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x04, 0x74, 0x79, 0x70,
- 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x41,
- 0x67, 0x65, 0x6e, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74,
- 0x79, 0x70, 0x65, 0x12, 0x33, 0x0a, 0x07, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x18, 0x64,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x41, 0x67, 0x65, 0x6e,
- 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x48, 0x00, 0x52,
- 0x07, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x12, 0x39, 0x0a, 0x0b, 0x61, 0x67, 0x65, 0x6e,
- 0x74, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x65, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e,
- 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x0a, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x53, 0x74,
- 0x61, 0x72, 0x74, 0x12, 0x45, 0x0a, 0x0d, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x75, 0x70,
- 0x64, 0x61, 0x74, 0x65, 0x18, 0x66, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x66, 0x6c, 0x6f,
- 0x77, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4e, 0x6f,
- 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x0c, 0x70, 0x6f,
- 0x6c, 0x69, 0x63, 0x79, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x52, 0x0a, 0x13, 0x65, 0x6e,
- 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x72, 0x65, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
- 0x65, 0x18, 0x67, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x45,
- 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x65, 0x67, 0x65, 0x6e, 0x4e, 0x6f, 0x74, 0x69,
- 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x12, 0x65, 0x6e, 0x64, 0x70,
- 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x65, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x12, 0x4b,
- 0x0a, 0x0f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74,
- 0x65, 0x18, 0x68, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x45,
- 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74,
- 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x0e, 0x65, 0x6e, 0x64,
- 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x42, 0x0a, 0x0e, 0x69,
- 0x70, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x18, 0x69, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x49, 0x50, 0x43, 0x61, 0x63,
- 0x68, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00,
- 0x52, 0x0d, 0x69, 0x70, 0x63, 0x61, 0x63, 0x68, 0x65, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12,
- 0x48, 0x0a, 0x0e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x75, 0x70, 0x73, 0x65, 0x72,
- 0x74, 0x18, 0x6a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x53,
- 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x4e, 0x6f, 0x74, 0x69,
- 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x0d, 0x73, 0x65, 0x72, 0x76,
- 0x69, 0x63, 0x65, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x12, 0x48, 0x0a, 0x0e, 0x73, 0x65, 0x72,
- 0x76, 0x69, 0x63, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x18, 0x6b, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x1f, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
- 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x0d, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x6c,
- 0x65, 0x74, 0x65, 0x42, 0x0e, 0x0a, 0x0c, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x22, 0x4b, 0x0a, 0x11, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x45, 0x76, 0x65, 0x6e,
- 0x74, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x22, 0x0a, 0x0c,
- 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x0c, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x22, 0x42, 0x0a, 0x10, 0x54, 0x69, 0x6d, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2e, 0x0a, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x04,
- 0x74, 0x69, 0x6d, 0x65, 0x22, 0x6d, 0x0a, 0x18, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x55, 0x70,
- 0x64, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09,
- 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x76, 0x69,
- 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x72, 0x65, 0x76, 0x69,
- 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x75, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75,
- 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x72, 0x75, 0x6c, 0x65, 0x43, 0x6f,
- 0x75, 0x6e, 0x74, 0x22, 0x59, 0x0a, 0x19, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52,
+ 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x64,
+ 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x14,
+ 0x0a, 0x05, 0x72, 0x65, 0x70, 0x6c, 0x79, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x08, 0x52, 0x05, 0x72,
+ 0x65, 0x70, 0x6c, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x6e, 0x73, 0x5f, 0x71, 0x75, 0x65, 0x72,
+ 0x79, 0x18, 0x12, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x64, 0x6e, 0x73, 0x51, 0x75, 0x65, 0x72,
+ 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x65, 0x6e,
+ 0x74, 0x69, 0x74, 0x79, 0x18, 0x13, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x31, 0x0a, 0x14, 0x64, 0x65,
+ 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69,
+ 0x74, 0x79, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x13, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x1f, 0x0a,
+ 0x0b, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x15, 0x20, 0x03,
+ 0x28, 0x09, 0x52, 0x0a, 0x68, 0x74, 0x74, 0x70, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x1b,
+ 0x0a, 0x09, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x16, 0x20, 0x03, 0x28,
+ 0x09, 0x52, 0x08, 0x68, 0x74, 0x74, 0x70, 0x50, 0x61, 0x74, 0x68, 0x12, 0x19, 0x0a, 0x08, 0x68,
+ 0x74, 0x74, 0x70, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x1f, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x68,
+ 0x74, 0x74, 0x70, 0x55, 0x72, 0x6c, 0x12, 0x2b, 0x0a, 0x09, 0x74, 0x63, 0x70, 0x5f, 0x66, 0x6c,
+ 0x61, 0x67, 0x73, 0x18, 0x17, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x66, 0x6c, 0x6f, 0x77,
+ 0x2e, 0x54, 0x43, 0x50, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x52, 0x08, 0x74, 0x63, 0x70, 0x46, 0x6c,
+ 0x61, 0x67, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65,
+ 0x18, 0x18, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x6f, 0x64, 0x65, 0x4e, 0x61, 0x6d, 0x65,
+ 0x12, 0x2e, 0x0a, 0x0a, 0x69, 0x70, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x19,
+ 0x20, 0x03, 0x28, 0x0e, 0x32, 0x0f, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x49, 0x50, 0x56, 0x65,
+ 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x69, 0x70, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e,
+ 0x12, 0x19, 0x0a, 0x08, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x1c, 0x20, 0x03,
+ 0x28, 0x09, 0x52, 0x07, 0x74, 0x72, 0x61, 0x63, 0x65, 0x49, 0x64, 0x22, 0xce, 0x01, 0x0a, 0x03,
+ 0x44, 0x4e, 0x53, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x69, 0x70, 0x73,
+ 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x03, 0x69, 0x70, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x74,
+ 0x74, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x74, 0x74, 0x6c, 0x12, 0x16, 0x0a,
+ 0x06, 0x63, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x63,
+ 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x2d, 0x0a, 0x12, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x11, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x72, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x06, 0x20,
+ 0x01, 0x28, 0x0d, 0x52, 0x05, 0x72, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x71, 0x74,
+ 0x79, 0x70, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x71, 0x74, 0x79, 0x70,
+ 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x72, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x08, 0x20,
+ 0x03, 0x28, 0x09, 0x52, 0x07, 0x72, 0x72, 0x74, 0x79, 0x70, 0x65, 0x73, 0x22, 0x34, 0x0a, 0x0a,
+ 0x48, 0x54, 0x54, 0x50, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65,
+ 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x22, 0x8c, 0x01, 0x0a, 0x04, 0x48, 0x54, 0x54, 0x50, 0x12, 0x12, 0x0a, 0x04, 0x63,
+ 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12,
+ 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x2a, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73,
+ 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x48, 0x54,
+ 0x54, 0x50, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72,
+ 0x73, 0x22, 0x9d, 0x01, 0x0a, 0x05, 0x4b, 0x61, 0x66, 0x6b, 0x61, 0x12, 0x1d, 0x0a, 0x0a, 0x65,
+ 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52,
+ 0x09, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x70,
+ 0x69, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52,
+ 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x17, 0x0a, 0x07, 0x61,
+ 0x70, 0x69, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x61, 0x70,
+ 0x69, 0x4b, 0x65, 0x79, 0x12, 0x25, 0x0a, 0x0e, 0x63, 0x6f, 0x72, 0x72, 0x65, 0x6c, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0d, 0x63, 0x6f,
+ 0x72, 0x72, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x74,
+ 0x6f, 0x70, 0x69, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69,
+ 0x63, 0x22, 0x3b, 0x0a, 0x07, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04,
+ 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x91,
+ 0x01, 0x0a, 0x09, 0x4c, 0x6f, 0x73, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x2d, 0x0a, 0x06,
+ 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x66,
+ 0x6c, 0x6f, 0x77, 0x2e, 0x4c, 0x6f, 0x73, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x53, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x52, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x6e,
+ 0x75, 0x6d, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x5f, 0x6c, 0x6f, 0x73, 0x74, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x6e, 0x75, 0x6d, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x4c,
+ 0x6f, 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x03, 0x63, 0x70, 0x75, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x03, 0x63,
+ 0x70, 0x75, 0x22, 0xf6, 0x04, 0x0a, 0x0a, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x45, 0x76, 0x65, 0x6e,
+ 0x74, 0x12, 0x28, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32,
+ 0x14, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x45, 0x76, 0x65, 0x6e,
+ 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x33, 0x0a, 0x07, 0x75,
+ 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x18, 0x64, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x66,
+ 0x6c, 0x6f, 0x77, 0x2e, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x55, 0x6e,
+ 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x48, 0x00, 0x52, 0x07, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e,
+ 0x12, 0x39, 0x0a, 0x0b, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18,
+ 0x65, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x54, 0x69, 0x6d,
+ 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52,
+ 0x0a, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x45, 0x0a, 0x0d, 0x70,
+ 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x18, 0x66, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79,
+ 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x0c, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x55, 0x70, 0x64, 0x61,
+ 0x74, 0x65, 0x12, 0x52, 0x0a, 0x13, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x72,
+ 0x65, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x18, 0x67, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x1f, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52,
0x65, 0x67, 0x65, 0x6e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x69, 0x64,
- 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09,
- 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f,
- 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x93,
- 0x01, 0x0a, 0x1a, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74,
- 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a,
- 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a,
- 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6c,
- 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x03,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x19, 0x0a, 0x08, 0x70,
- 0x6f, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70,
- 0x6f, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70,
- 0x61, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73,
- 0x70, 0x61, 0x63, 0x65, 0x22, 0x99, 0x02, 0x0a, 0x13, 0x49, 0x50, 0x43, 0x61, 0x63, 0x68, 0x65,
- 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04,
- 0x63, 0x69, 0x64, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x69, 0x64, 0x72,
- 0x12, 0x1a, 0x0a, 0x08, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x0d, 0x52, 0x08, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x3f, 0x0a, 0x0c,
- 0x6f, 0x6c, 0x64, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65,
- 0x52, 0x0b, 0x6f, 0x6c, 0x64, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x17, 0x0a,
- 0x07, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x69, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06,
- 0x68, 0x6f, 0x73, 0x74, 0x49, 0x70, 0x12, 0x1e, 0x0a, 0x0b, 0x6f, 0x6c, 0x64, 0x5f, 0x68, 0x6f,
- 0x73, 0x74, 0x5f, 0x69, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6f, 0x6c, 0x64,
- 0x48, 0x6f, 0x73, 0x74, 0x49, 0x70, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70,
- 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x65, 0x6e, 0x63,
- 0x72, 0x79, 0x70, 0x74, 0x4b, 0x65, 0x79, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73,
- 0x70, 0x61, 0x63, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65,
- 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x70, 0x6f, 0x64, 0x5f, 0x6e, 0x61, 0x6d,
- 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x6f, 0x64, 0x4e, 0x61, 0x6d, 0x65,
- 0x22, 0x43, 0x0a, 0x1d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x70, 0x73, 0x65, 0x72,
- 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x64, 0x64,
- 0x72, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69,
- 0x70, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52,
- 0x04, 0x70, 0x6f, 0x72, 0x74, 0x22, 0x9a, 0x03, 0x0a, 0x19, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
- 0x65, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52,
- 0x02, 0x69, 0x64, 0x12, 0x4e, 0x0a, 0x10, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x5f,
- 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e,
- 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x70, 0x73, 0x65,
- 0x72, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x64,
- 0x64, 0x72, 0x52, 0x0f, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x41, 0x64, 0x64, 0x72,
- 0x65, 0x73, 0x73, 0x12, 0x50, 0x0a, 0x11, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x5f, 0x61,
- 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23,
- 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x70, 0x73,
- 0x65, 0x72, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41,
- 0x64, 0x64, 0x72, 0x52, 0x10, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x41, 0x64, 0x64, 0x72,
- 0x65, 0x73, 0x73, 0x65, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x0e, 0x74, 0x72, 0x61,
- 0x66, 0x66, 0x69, 0x63, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28,
- 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x50, 0x6f,
- 0x6c, 0x69, 0x63, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65,
- 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d,
- 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x65, 0x78, 0x74, 0x5f, 0x74, 0x72,
- 0x61, 0x66, 0x66, 0x69, 0x63, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x08, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x10, 0x65, 0x78, 0x74, 0x54, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x50, 0x6f,
- 0x6c, 0x69, 0x63, 0x79, 0x12, 0x2c, 0x0a, 0x12, 0x69, 0x6e, 0x74, 0x5f, 0x74, 0x72, 0x61, 0x66,
- 0x66, 0x69, 0x63, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x10, 0x69, 0x6e, 0x74, 0x54, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x50, 0x6f, 0x6c, 0x69,
- 0x63, 0x79, 0x22, 0x2b, 0x0a, 0x19, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x6c,
- 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12,
- 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x02, 0x69, 0x64, 0x22,
- 0x3c, 0x0a, 0x10, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x66,
- 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x0d, 0x52, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d,
- 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xef, 0x02,
- 0x0a, 0x0a, 0x44, 0x65, 0x62, 0x75, 0x67, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x04,
- 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x66, 0x6c, 0x6f,
- 0x77, 0x2e, 0x44, 0x65, 0x62, 0x75, 0x67, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65,
- 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x26, 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x45, 0x6e,
- 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x30,
- 0x0a, 0x04, 0x68, 0x61, 0x73, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55,
- 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x04, 0x68, 0x61, 0x73, 0x68,
- 0x12, 0x30, 0x0a, 0x04, 0x61, 0x72, 0x67, 0x31, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
- 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x04, 0x61, 0x72,
- 0x67, 0x31, 0x12, 0x30, 0x0a, 0x04, 0x61, 0x72, 0x67, 0x32, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
- 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x04,
- 0x61, 0x72, 0x67, 0x32, 0x12, 0x30, 0x0a, 0x04, 0x61, 0x72, 0x67, 0x33, 0x18, 0x06, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65,
- 0x52, 0x04, 0x61, 0x72, 0x67, 0x33, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67,
- 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
- 0x12, 0x2d, 0x0a, 0x03, 0x63, 0x70, 0x75, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
- 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x03, 0x63, 0x70, 0x75, 0x2a,
- 0x39, 0x0a, 0x08, 0x46, 0x6c, 0x6f, 0x77, 0x54, 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a, 0x0c, 0x55,
- 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x10, 0x00, 0x12, 0x09, 0x0a,
- 0x05, 0x4c, 0x33, 0x5f, 0x4c, 0x34, 0x10, 0x01, 0x12, 0x06, 0x0a, 0x02, 0x4c, 0x37, 0x10, 0x02,
- 0x12, 0x08, 0x0a, 0x04, 0x53, 0x4f, 0x43, 0x4b, 0x10, 0x03, 0x2a, 0x39, 0x0a, 0x08, 0x41, 0x75,
- 0x74, 0x68, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x49, 0x53, 0x41, 0x42, 0x4c,
- 0x45, 0x44, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, 0x49, 0x52, 0x45, 0x10, 0x01, 0x12,
- 0x14, 0x0a, 0x10, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x41, 0x4c, 0x57, 0x41, 0x59, 0x53, 0x5f, 0x46,
- 0x41, 0x49, 0x4c, 0x10, 0x02, 0x2a, 0xea, 0x01, 0x0a, 0x15, 0x54, 0x72, 0x61, 0x63, 0x65, 0x4f,
- 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12,
- 0x11, 0x0a, 0x0d, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x50, 0x4f, 0x49, 0x4e, 0x54,
- 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x54, 0x4f, 0x5f, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x10, 0x01,
- 0x12, 0x0b, 0x0a, 0x07, 0x54, 0x4f, 0x5f, 0x48, 0x4f, 0x53, 0x54, 0x10, 0x02, 0x12, 0x0c, 0x0a,
- 0x08, 0x54, 0x4f, 0x5f, 0x53, 0x54, 0x41, 0x43, 0x4b, 0x10, 0x03, 0x12, 0x0e, 0x0a, 0x0a, 0x54,
- 0x4f, 0x5f, 0x4f, 0x56, 0x45, 0x52, 0x4c, 0x41, 0x59, 0x10, 0x04, 0x12, 0x0f, 0x0a, 0x0b, 0x54,
- 0x4f, 0x5f, 0x45, 0x4e, 0x44, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x10, 0x65, 0x12, 0x11, 0x0a, 0x0d,
- 0x46, 0x52, 0x4f, 0x4d, 0x5f, 0x45, 0x4e, 0x44, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x10, 0x05, 0x12,
- 0x0e, 0x0a, 0x0a, 0x46, 0x52, 0x4f, 0x4d, 0x5f, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x10, 0x06, 0x12,
- 0x0d, 0x0a, 0x09, 0x46, 0x52, 0x4f, 0x4d, 0x5f, 0x48, 0x4f, 0x53, 0x54, 0x10, 0x07, 0x12, 0x0e,
- 0x0a, 0x0a, 0x46, 0x52, 0x4f, 0x4d, 0x5f, 0x53, 0x54, 0x41, 0x43, 0x4b, 0x10, 0x08, 0x12, 0x10,
- 0x0a, 0x0c, 0x46, 0x52, 0x4f, 0x4d, 0x5f, 0x4f, 0x56, 0x45, 0x52, 0x4c, 0x41, 0x59, 0x10, 0x09,
- 0x12, 0x10, 0x0a, 0x0c, 0x46, 0x52, 0x4f, 0x4d, 0x5f, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b,
- 0x10, 0x0a, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x4f, 0x5f, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b,
- 0x10, 0x0b, 0x2a, 0x48, 0x0a, 0x0a, 0x4c, 0x37, 0x46, 0x6c, 0x6f, 0x77, 0x54, 0x79, 0x70, 0x65,
- 0x12, 0x13, 0x0a, 0x0f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x4c, 0x37, 0x5f, 0x54,
- 0x59, 0x50, 0x45, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54,
- 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x52, 0x45, 0x53, 0x50, 0x4f, 0x4e, 0x53, 0x45, 0x10, 0x02,
- 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x41, 0x4d, 0x50, 0x4c, 0x45, 0x10, 0x03, 0x2a, 0x30, 0x0a, 0x09,
- 0x49, 0x50, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0f, 0x0a, 0x0b, 0x49, 0x50, 0x5f,
- 0x4e, 0x4f, 0x54, 0x5f, 0x55, 0x53, 0x45, 0x44, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x49, 0x50,
- 0x76, 0x34, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x49, 0x50, 0x76, 0x36, 0x10, 0x02, 0x2a, 0x7c,
- 0x0a, 0x07, 0x56, 0x65, 0x72, 0x64, 0x69, 0x63, 0x74, 0x12, 0x13, 0x0a, 0x0f, 0x56, 0x45, 0x52,
- 0x44, 0x49, 0x43, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0d,
- 0x0a, 0x09, 0x46, 0x4f, 0x52, 0x57, 0x41, 0x52, 0x44, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0b, 0x0a,
- 0x07, 0x44, 0x52, 0x4f, 0x50, 0x50, 0x45, 0x44, 0x10, 0x02, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x52,
- 0x52, 0x4f, 0x52, 0x10, 0x03, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x55, 0x44, 0x49, 0x54, 0x10, 0x04,
- 0x12, 0x0e, 0x0a, 0x0a, 0x52, 0x45, 0x44, 0x49, 0x52, 0x45, 0x43, 0x54, 0x45, 0x44, 0x10, 0x05,
- 0x12, 0x0a, 0x0a, 0x06, 0x54, 0x52, 0x41, 0x43, 0x45, 0x44, 0x10, 0x06, 0x12, 0x0e, 0x0a, 0x0a,
- 0x54, 0x52, 0x41, 0x4e, 0x53, 0x4c, 0x41, 0x54, 0x45, 0x44, 0x10, 0x07, 0x2a, 0xb2, 0x0f, 0x0a,
- 0x0a, 0x44, 0x72, 0x6f, 0x70, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x17, 0x0a, 0x13, 0x44,
- 0x52, 0x4f, 0x50, 0x5f, 0x52, 0x45, 0x41, 0x53, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f,
- 0x57, 0x4e, 0x10, 0x00, 0x12, 0x17, 0x0a, 0x12, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f,
- 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x4d, 0x41, 0x43, 0x10, 0x82, 0x01, 0x12, 0x1c, 0x0a,
- 0x17, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x44, 0x45, 0x53, 0x54, 0x49, 0x4e, 0x41,
- 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x41, 0x43, 0x10, 0x83, 0x01, 0x12, 0x16, 0x0a, 0x11, 0x49,
- 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x49, 0x50,
- 0x10, 0x84, 0x01, 0x12, 0x12, 0x0a, 0x0d, 0x50, 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x5f, 0x44, 0x45,
- 0x4e, 0x49, 0x45, 0x44, 0x10, 0x85, 0x01, 0x12, 0x1b, 0x0a, 0x16, 0x49, 0x4e, 0x56, 0x41, 0x4c,
- 0x49, 0x44, 0x5f, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x54, 0x5f, 0x44, 0x52, 0x4f, 0x50, 0x50, 0x45,
- 0x44, 0x10, 0x86, 0x01, 0x12, 0x23, 0x0a, 0x1e, 0x43, 0x54, 0x5f, 0x54, 0x52, 0x55, 0x4e, 0x43,
- 0x41, 0x54, 0x45, 0x44, 0x5f, 0x4f, 0x52, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f,
- 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x87, 0x01, 0x12, 0x1c, 0x0a, 0x17, 0x43, 0x54, 0x5f,
- 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4e, 0x47, 0x5f, 0x54, 0x43, 0x50, 0x5f, 0x41, 0x43, 0x4b, 0x5f,
- 0x46, 0x4c, 0x41, 0x47, 0x10, 0x88, 0x01, 0x12, 0x1b, 0x0a, 0x16, 0x43, 0x54, 0x5f, 0x55, 0x4e,
- 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x4c, 0x34, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f,
- 0x4c, 0x10, 0x89, 0x01, 0x12, 0x27, 0x0a, 0x22, 0x43, 0x54, 0x5f, 0x43, 0x41, 0x4e, 0x4e, 0x4f,
- 0x54, 0x5f, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x5f, 0x46,
- 0x52, 0x4f, 0x4d, 0x5f, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x54, 0x10, 0x8a, 0x01, 0x12, 0x1c, 0x0a,
- 0x17, 0x55, 0x4e, 0x53, 0x55, 0x50, 0x50, 0x4f, 0x52, 0x54, 0x45, 0x44, 0x5f, 0x4c, 0x33, 0x5f,
- 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x10, 0x8b, 0x01, 0x12, 0x15, 0x0a, 0x10, 0x4d,
- 0x49, 0x53, 0x53, 0x45, 0x44, 0x5f, 0x54, 0x41, 0x49, 0x4c, 0x5f, 0x43, 0x41, 0x4c, 0x4c, 0x10,
- 0x8c, 0x01, 0x12, 0x1c, 0x0a, 0x17, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x57, 0x52, 0x49, 0x54,
- 0x49, 0x4e, 0x47, 0x5f, 0x54, 0x4f, 0x5f, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x54, 0x10, 0x8d, 0x01,
- 0x12, 0x18, 0x0a, 0x13, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x4c, 0x34, 0x5f, 0x50,
- 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x10, 0x8e, 0x01, 0x12, 0x18, 0x0a, 0x13, 0x55, 0x4e,
- 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x49, 0x43, 0x4d, 0x50, 0x56, 0x34, 0x5f, 0x43, 0x4f, 0x44,
- 0x45, 0x10, 0x8f, 0x01, 0x12, 0x18, 0x0a, 0x13, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f,
- 0x49, 0x43, 0x4d, 0x50, 0x56, 0x34, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x10, 0x90, 0x01, 0x12, 0x18,
- 0x0a, 0x13, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x49, 0x43, 0x4d, 0x50, 0x56, 0x36,
- 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x10, 0x91, 0x01, 0x12, 0x18, 0x0a, 0x13, 0x55, 0x4e, 0x4b, 0x4e,
- 0x4f, 0x57, 0x4e, 0x5f, 0x49, 0x43, 0x4d, 0x50, 0x56, 0x36, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x10,
- 0x92, 0x01, 0x12, 0x20, 0x0a, 0x1b, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x52, 0x45, 0x54, 0x52,
- 0x49, 0x45, 0x56, 0x49, 0x4e, 0x47, 0x5f, 0x54, 0x55, 0x4e, 0x4e, 0x45, 0x4c, 0x5f, 0x4b, 0x45,
- 0x59, 0x10, 0x93, 0x01, 0x12, 0x24, 0x0a, 0x1f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x52, 0x45,
- 0x54, 0x52, 0x49, 0x45, 0x56, 0x49, 0x4e, 0x47, 0x5f, 0x54, 0x55, 0x4e, 0x4e, 0x45, 0x4c, 0x5f,
- 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x53, 0x10, 0x94, 0x01, 0x12, 0x1a, 0x0a, 0x15, 0x49, 0x4e,
- 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x47, 0x45, 0x4e, 0x45, 0x56, 0x45, 0x5f, 0x4f, 0x50, 0x54,
- 0x49, 0x4f, 0x4e, 0x10, 0x95, 0x01, 0x12, 0x1e, 0x0a, 0x19, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57,
- 0x4e, 0x5f, 0x4c, 0x33, 0x5f, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x41, 0x44, 0x44, 0x52,
- 0x45, 0x53, 0x53, 0x10, 0x96, 0x01, 0x12, 0x1b, 0x0a, 0x16, 0x53, 0x54, 0x41, 0x4c, 0x45, 0x5f,
- 0x4f, 0x52, 0x5f, 0x55, 0x4e, 0x52, 0x4f, 0x55, 0x54, 0x41, 0x42, 0x4c, 0x45, 0x5f, 0x49, 0x50,
- 0x10, 0x97, 0x01, 0x12, 0x26, 0x0a, 0x21, 0x4e, 0x4f, 0x5f, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x49,
- 0x4e, 0x47, 0x5f, 0x4c, 0x4f, 0x43, 0x41, 0x4c, 0x5f, 0x43, 0x4f, 0x4e, 0x54, 0x41, 0x49, 0x4e,
- 0x45, 0x52, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x98, 0x01, 0x12, 0x27, 0x0a, 0x22, 0x45,
- 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x57, 0x48, 0x49, 0x4c, 0x45, 0x5f, 0x43, 0x4f, 0x52, 0x52, 0x45,
- 0x43, 0x54, 0x49, 0x4e, 0x47, 0x5f, 0x4c, 0x33, 0x5f, 0x43, 0x48, 0x45, 0x43, 0x4b, 0x53, 0x55,
- 0x4d, 0x10, 0x99, 0x01, 0x12, 0x27, 0x0a, 0x22, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x57, 0x48,
- 0x49, 0x4c, 0x45, 0x5f, 0x43, 0x4f, 0x52, 0x52, 0x45, 0x43, 0x54, 0x49, 0x4e, 0x47, 0x5f, 0x4c,
- 0x34, 0x5f, 0x43, 0x48, 0x45, 0x43, 0x4b, 0x53, 0x55, 0x4d, 0x10, 0x9a, 0x01, 0x12, 0x1c, 0x0a,
- 0x17, 0x43, 0x54, 0x5f, 0x4d, 0x41, 0x50, 0x5f, 0x49, 0x4e, 0x53, 0x45, 0x52, 0x54, 0x49, 0x4f,
- 0x4e, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x9b, 0x01, 0x12, 0x22, 0x0a, 0x1d, 0x49,
- 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x49, 0x50, 0x56, 0x36, 0x5f, 0x45, 0x58, 0x54, 0x45,
- 0x4e, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x9c, 0x01, 0x12,
- 0x23, 0x0a, 0x1e, 0x49, 0x50, 0x5f, 0x46, 0x52, 0x41, 0x47, 0x4d, 0x45, 0x4e, 0x54, 0x41, 0x54,
- 0x49, 0x4f, 0x4e, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x53, 0x55, 0x50, 0x50, 0x4f, 0x52, 0x54, 0x45,
- 0x44, 0x10, 0x9d, 0x01, 0x12, 0x1e, 0x0a, 0x19, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f,
- 0x42, 0x41, 0x43, 0x4b, 0x45, 0x4e, 0x44, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e,
- 0x44, 0x10, 0x9e, 0x01, 0x12, 0x28, 0x0a, 0x23, 0x4e, 0x4f, 0x5f, 0x54, 0x55, 0x4e, 0x4e, 0x45,
- 0x4c, 0x5f, 0x4f, 0x52, 0x5f, 0x45, 0x4e, 0x43, 0x41, 0x50, 0x53, 0x55, 0x4c, 0x41, 0x54, 0x49,
- 0x4f, 0x4e, 0x5f, 0x45, 0x4e, 0x44, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x10, 0xa0, 0x01, 0x12, 0x23,
- 0x0a, 0x1e, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x5f, 0x54, 0x4f, 0x5f, 0x49, 0x4e, 0x53, 0x45,
- 0x52, 0x54, 0x5f, 0x49, 0x4e, 0x54, 0x4f, 0x5f, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x4d, 0x41, 0x50,
- 0x10, 0xa1, 0x01, 0x12, 0x2b, 0x0a, 0x26, 0x52, 0x45, 0x41, 0x43, 0x48, 0x45, 0x44, 0x5f, 0x45,
- 0x44, 0x54, 0x5f, 0x52, 0x41, 0x54, 0x45, 0x5f, 0x4c, 0x49, 0x4d, 0x49, 0x54, 0x49, 0x4e, 0x47,
- 0x5f, 0x44, 0x52, 0x4f, 0x50, 0x5f, 0x48, 0x4f, 0x52, 0x49, 0x5a, 0x4f, 0x4e, 0x10, 0xa2, 0x01,
- 0x12, 0x26, 0x0a, 0x21, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x43, 0x4f, 0x4e, 0x4e,
- 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x54, 0x52, 0x41, 0x43, 0x4b, 0x49, 0x4e, 0x47, 0x5f,
- 0x53, 0x54, 0x41, 0x54, 0x45, 0x10, 0xa3, 0x01, 0x12, 0x1e, 0x0a, 0x19, 0x4c, 0x4f, 0x43, 0x41,
- 0x4c, 0x5f, 0x48, 0x4f, 0x53, 0x54, 0x5f, 0x49, 0x53, 0x5f, 0x55, 0x4e, 0x52, 0x45, 0x41, 0x43,
- 0x48, 0x41, 0x42, 0x4c, 0x45, 0x10, 0xa4, 0x01, 0x12, 0x3a, 0x0a, 0x35, 0x4e, 0x4f, 0x5f, 0x43,
- 0x4f, 0x4e, 0x46, 0x49, 0x47, 0x55, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x41, 0x56, 0x41,
- 0x49, 0x4c, 0x41, 0x42, 0x4c, 0x45, 0x5f, 0x54, 0x4f, 0x5f, 0x50, 0x45, 0x52, 0x46, 0x4f, 0x52,
- 0x4d, 0x5f, 0x50, 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x5f, 0x44, 0x45, 0x43, 0x49, 0x53, 0x49, 0x4f,
- 0x4e, 0x10, 0xa5, 0x01, 0x12, 0x1c, 0x0a, 0x17, 0x55, 0x4e, 0x53, 0x55, 0x50, 0x50, 0x4f, 0x52,
- 0x54, 0x45, 0x44, 0x5f, 0x4c, 0x32, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x10,
- 0xa6, 0x01, 0x12, 0x22, 0x0a, 0x1d, 0x4e, 0x4f, 0x5f, 0x4d, 0x41, 0x50, 0x50, 0x49, 0x4e, 0x47,
- 0x5f, 0x46, 0x4f, 0x52, 0x5f, 0x4e, 0x41, 0x54, 0x5f, 0x4d, 0x41, 0x53, 0x51, 0x55, 0x45, 0x52,
- 0x41, 0x44, 0x45, 0x10, 0xa7, 0x01, 0x12, 0x2c, 0x0a, 0x27, 0x55, 0x4e, 0x53, 0x55, 0x50, 0x50,
- 0x4f, 0x52, 0x54, 0x45, 0x44, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x46,
- 0x4f, 0x52, 0x5f, 0x4e, 0x41, 0x54, 0x5f, 0x4d, 0x41, 0x53, 0x51, 0x55, 0x45, 0x52, 0x41, 0x44,
- 0x45, 0x10, 0xa8, 0x01, 0x12, 0x16, 0x0a, 0x11, 0x46, 0x49, 0x42, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b,
- 0x55, 0x50, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0xa9, 0x01, 0x12, 0x28, 0x0a, 0x23,
- 0x45, 0x4e, 0x43, 0x41, 0x50, 0x53, 0x55, 0x4c, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x54, 0x52,
- 0x41, 0x46, 0x46, 0x49, 0x43, 0x5f, 0x49, 0x53, 0x5f, 0x50, 0x52, 0x4f, 0x48, 0x49, 0x42, 0x49,
- 0x54, 0x45, 0x44, 0x10, 0xaa, 0x01, 0x12, 0x15, 0x0a, 0x10, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49,
- 0x44, 0x5f, 0x49, 0x44, 0x45, 0x4e, 0x54, 0x49, 0x54, 0x59, 0x10, 0xab, 0x01, 0x12, 0x13, 0x0a,
- 0x0e, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x53, 0x45, 0x4e, 0x44, 0x45, 0x52, 0x10,
- 0xac, 0x01, 0x12, 0x13, 0x0a, 0x0e, 0x4e, 0x41, 0x54, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x4e, 0x45,
- 0x45, 0x44, 0x45, 0x44, 0x10, 0xad, 0x01, 0x12, 0x13, 0x0a, 0x0e, 0x49, 0x53, 0x5f, 0x41, 0x5f,
- 0x43, 0x4c, 0x55, 0x53, 0x54, 0x45, 0x52, 0x49, 0x50, 0x10, 0xae, 0x01, 0x12, 0x2e, 0x0a, 0x29,
- 0x46, 0x49, 0x52, 0x53, 0x54, 0x5f, 0x4c, 0x4f, 0x47, 0x49, 0x43, 0x41, 0x4c, 0x5f, 0x44, 0x41,
- 0x54, 0x41, 0x47, 0x52, 0x41, 0x4d, 0x5f, 0x46, 0x52, 0x41, 0x47, 0x4d, 0x45, 0x4e, 0x54, 0x5f,
- 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0xaf, 0x01, 0x12, 0x1d, 0x0a, 0x18,
- 0x46, 0x4f, 0x52, 0x42, 0x49, 0x44, 0x44, 0x45, 0x4e, 0x5f, 0x49, 0x43, 0x4d, 0x50, 0x56, 0x36,
- 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0xb0, 0x01, 0x12, 0x21, 0x0a, 0x1c, 0x44,
- 0x45, 0x4e, 0x49, 0x45, 0x44, 0x5f, 0x42, 0x59, 0x5f, 0x4c, 0x42, 0x5f, 0x53, 0x52, 0x43, 0x5f,
- 0x52, 0x41, 0x4e, 0x47, 0x45, 0x5f, 0x43, 0x48, 0x45, 0x43, 0x4b, 0x10, 0xb1, 0x01, 0x12, 0x19,
- 0x0a, 0x14, 0x53, 0x4f, 0x43, 0x4b, 0x45, 0x54, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x5f,
- 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0xb2, 0x01, 0x12, 0x19, 0x0a, 0x14, 0x53, 0x4f, 0x43,
- 0x4b, 0x45, 0x54, 0x5f, 0x41, 0x53, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45,
- 0x44, 0x10, 0xb3, 0x01, 0x12, 0x31, 0x0a, 0x2c, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x52, 0x45,
- 0x44, 0x49, 0x52, 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x53, 0x55,
- 0x50, 0x50, 0x4f, 0x52, 0x54, 0x45, 0x44, 0x5f, 0x46, 0x4f, 0x52, 0x5f, 0x50, 0x52, 0x4f, 0x54,
- 0x4f, 0x43, 0x4f, 0x4c, 0x10, 0xb4, 0x01, 0x12, 0x10, 0x0a, 0x0b, 0x50, 0x4f, 0x4c, 0x49, 0x43,
- 0x59, 0x5f, 0x44, 0x45, 0x4e, 0x59, 0x10, 0xb5, 0x01, 0x12, 0x12, 0x0a, 0x0d, 0x56, 0x4c, 0x41,
- 0x4e, 0x5f, 0x46, 0x49, 0x4c, 0x54, 0x45, 0x52, 0x45, 0x44, 0x10, 0xb6, 0x01, 0x12, 0x10, 0x0a,
- 0x0b, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x56, 0x4e, 0x49, 0x10, 0xb7, 0x01, 0x12,
- 0x16, 0x0a, 0x11, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x54, 0x43, 0x5f, 0x42, 0x55,
- 0x46, 0x46, 0x45, 0x52, 0x10, 0xb8, 0x01, 0x12, 0x0b, 0x0a, 0x06, 0x4e, 0x4f, 0x5f, 0x53, 0x49,
- 0x44, 0x10, 0xb9, 0x01, 0x12, 0x17, 0x0a, 0x12, 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4e, 0x47, 0x5f,
- 0x53, 0x52, 0x56, 0x36, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x10, 0xba, 0x01, 0x12, 0x0a, 0x0a,
- 0x05, 0x4e, 0x41, 0x54, 0x34, 0x36, 0x10, 0xbb, 0x01, 0x12, 0x0a, 0x0a, 0x05, 0x4e, 0x41, 0x54,
- 0x36, 0x34, 0x10, 0xbc, 0x01, 0x12, 0x12, 0x0a, 0x0d, 0x41, 0x55, 0x54, 0x48, 0x5f, 0x52, 0x45,
- 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0xbd, 0x01, 0x12, 0x14, 0x0a, 0x0f, 0x43, 0x54, 0x5f,
- 0x4e, 0x4f, 0x5f, 0x4d, 0x41, 0x50, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0xbe, 0x01, 0x12,
- 0x16, 0x0a, 0x11, 0x53, 0x4e, 0x41, 0x54, 0x5f, 0x4e, 0x4f, 0x5f, 0x4d, 0x41, 0x50, 0x5f, 0x46,
- 0x4f, 0x55, 0x4e, 0x44, 0x10, 0xbf, 0x01, 0x12, 0x17, 0x0a, 0x12, 0x49, 0x4e, 0x56, 0x41, 0x4c,
- 0x49, 0x44, 0x5f, 0x43, 0x4c, 0x55, 0x53, 0x54, 0x45, 0x52, 0x5f, 0x49, 0x44, 0x10, 0xc0, 0x01,
- 0x12, 0x27, 0x0a, 0x22, 0x55, 0x4e, 0x53, 0x55, 0x50, 0x50, 0x4f, 0x52, 0x54, 0x45, 0x44, 0x5f,
- 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x46, 0x4f, 0x52, 0x5f, 0x44, 0x53, 0x52,
- 0x5f, 0x45, 0x4e, 0x43, 0x41, 0x50, 0x10, 0xc1, 0x01, 0x12, 0x16, 0x0a, 0x11, 0x4e, 0x4f, 0x5f,
- 0x45, 0x47, 0x52, 0x45, 0x53, 0x53, 0x5f, 0x47, 0x41, 0x54, 0x45, 0x57, 0x41, 0x59, 0x10, 0xc2,
- 0x01, 0x2a, 0x4a, 0x0a, 0x10, 0x54, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x44, 0x69, 0x72, 0x65,
- 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1d, 0x0a, 0x19, 0x54, 0x52, 0x41, 0x46, 0x46, 0x49, 0x43,
- 0x5f, 0x44, 0x49, 0x52, 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f,
- 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x47, 0x52, 0x45, 0x53, 0x53, 0x10,
- 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x45, 0x47, 0x52, 0x45, 0x53, 0x53, 0x10, 0x02, 0x2a, 0x8d, 0x02,
- 0x0a, 0x11, 0x44, 0x65, 0x62, 0x75, 0x67, 0x43, 0x61, 0x70, 0x74, 0x75, 0x72, 0x65, 0x50, 0x6f,
- 0x69, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x19, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x41, 0x50, 0x54, 0x55,
- 0x52, 0x45, 0x5f, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e,
- 0x10, 0x00, 0x12, 0x18, 0x0a, 0x14, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x41, 0x50, 0x54, 0x55, 0x52,
- 0x45, 0x5f, 0x44, 0x45, 0x4c, 0x49, 0x56, 0x45, 0x52, 0x59, 0x10, 0x04, 0x12, 0x17, 0x0a, 0x13,
- 0x44, 0x42, 0x47, 0x5f, 0x43, 0x41, 0x50, 0x54, 0x55, 0x52, 0x45, 0x5f, 0x46, 0x52, 0x4f, 0x4d,
- 0x5f, 0x4c, 0x42, 0x10, 0x05, 0x12, 0x19, 0x0a, 0x15, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x41, 0x50,
- 0x54, 0x55, 0x52, 0x45, 0x5f, 0x41, 0x46, 0x54, 0x45, 0x52, 0x5f, 0x56, 0x34, 0x36, 0x10, 0x06,
- 0x12, 0x19, 0x0a, 0x15, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x41, 0x50, 0x54, 0x55, 0x52, 0x45, 0x5f,
- 0x41, 0x46, 0x54, 0x45, 0x52, 0x5f, 0x56, 0x36, 0x34, 0x10, 0x07, 0x12, 0x19, 0x0a, 0x15, 0x44,
- 0x42, 0x47, 0x5f, 0x43, 0x41, 0x50, 0x54, 0x55, 0x52, 0x45, 0x5f, 0x50, 0x52, 0x4f, 0x58, 0x59,
- 0x5f, 0x50, 0x52, 0x45, 0x10, 0x08, 0x12, 0x1a, 0x0a, 0x16, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x41,
- 0x50, 0x54, 0x55, 0x52, 0x45, 0x5f, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x50, 0x4f, 0x53, 0x54,
- 0x10, 0x09, 0x12, 0x18, 0x0a, 0x14, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x41, 0x50, 0x54, 0x55, 0x52,
- 0x45, 0x5f, 0x53, 0x4e, 0x41, 0x54, 0x5f, 0x50, 0x52, 0x45, 0x10, 0x0a, 0x12, 0x19, 0x0a, 0x15,
- 0x44, 0x42, 0x47, 0x5f, 0x43, 0x41, 0x50, 0x54, 0x55, 0x52, 0x45, 0x5f, 0x53, 0x4e, 0x41, 0x54,
- 0x5f, 0x50, 0x4f, 0x53, 0x54, 0x10, 0x0b, 0x22, 0x04, 0x08, 0x01, 0x10, 0x03, 0x2a, 0x39, 0x0a,
- 0x09, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e,
- 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x45, 0x76, 0x65, 0x6e, 0x74,
- 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x52, 0x65, 0x63, 0x6f,
- 0x72, 0x64, 0x4c, 0x6f, 0x73, 0x74, 0x10, 0x02, 0x2a, 0x7f, 0x0a, 0x0f, 0x4c, 0x6f, 0x73, 0x74,
- 0x45, 0x76, 0x65, 0x6e, 0x74, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1d, 0x0a, 0x19, 0x55,
- 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x4c, 0x4f, 0x53, 0x54, 0x5f, 0x45, 0x56, 0x45, 0x4e,
- 0x54, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x10, 0x00, 0x12, 0x1a, 0x0a, 0x16, 0x50, 0x45,
- 0x52, 0x46, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x42, 0x55,
- 0x46, 0x46, 0x45, 0x52, 0x10, 0x01, 0x12, 0x19, 0x0a, 0x15, 0x4f, 0x42, 0x53, 0x45, 0x52, 0x56,
- 0x45, 0x52, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x53, 0x5f, 0x51, 0x55, 0x45, 0x55, 0x45, 0x10,
- 0x02, 0x12, 0x16, 0x0a, 0x12, 0x48, 0x55, 0x42, 0x42, 0x4c, 0x45, 0x5f, 0x52, 0x49, 0x4e, 0x47,
- 0x5f, 0x42, 0x55, 0x46, 0x46, 0x45, 0x52, 0x10, 0x03, 0x2a, 0xae, 0x02, 0x0a, 0x0e, 0x41, 0x67,
- 0x65, 0x6e, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13,
- 0x41, 0x47, 0x45, 0x4e, 0x54, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e,
- 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x41, 0x47, 0x45, 0x4e, 0x54, 0x5f, 0x53,
- 0x54, 0x41, 0x52, 0x54, 0x45, 0x44, 0x10, 0x02, 0x12, 0x12, 0x0a, 0x0e, 0x50, 0x4f, 0x4c, 0x49,
- 0x43, 0x59, 0x5f, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x44, 0x10, 0x03, 0x12, 0x12, 0x0a, 0x0e,
- 0x50, 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, 0x04,
- 0x12, 0x1f, 0x0a, 0x1b, 0x45, 0x4e, 0x44, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x5f, 0x52, 0x45, 0x47,
- 0x45, 0x4e, 0x45, 0x52, 0x41, 0x54, 0x45, 0x5f, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10,
- 0x05, 0x12, 0x1f, 0x0a, 0x1b, 0x45, 0x4e, 0x44, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x5f, 0x52, 0x45,
- 0x47, 0x45, 0x4e, 0x45, 0x52, 0x41, 0x54, 0x45, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x55, 0x52, 0x45,
- 0x10, 0x06, 0x12, 0x14, 0x0a, 0x10, 0x45, 0x4e, 0x44, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x5f, 0x43,
- 0x52, 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, 0x07, 0x12, 0x14, 0x0a, 0x10, 0x45, 0x4e, 0x44, 0x50,
- 0x4f, 0x49, 0x4e, 0x54, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, 0x08, 0x12, 0x14,
- 0x0a, 0x10, 0x49, 0x50, 0x43, 0x41, 0x43, 0x48, 0x45, 0x5f, 0x55, 0x50, 0x53, 0x45, 0x52, 0x54,
- 0x45, 0x44, 0x10, 0x09, 0x12, 0x13, 0x0a, 0x0f, 0x49, 0x50, 0x43, 0x41, 0x43, 0x48, 0x45, 0x5f,
- 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, 0x0a, 0x12, 0x14, 0x0a, 0x10, 0x53, 0x45, 0x52,
- 0x56, 0x49, 0x43, 0x45, 0x5f, 0x55, 0x50, 0x53, 0x45, 0x52, 0x54, 0x45, 0x44, 0x10, 0x0b, 0x12,
- 0x13, 0x0a, 0x0f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54,
- 0x45, 0x44, 0x10, 0x0c, 0x22, 0x04, 0x08, 0x01, 0x10, 0x01, 0x2a, 0xd8, 0x01, 0x0a, 0x16, 0x53,
- 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x1c, 0x0a, 0x18, 0x53, 0x4f, 0x43, 0x4b, 0x5f, 0x58, 0x4c,
- 0x41, 0x54, 0x45, 0x5f, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57,
- 0x4e, 0x10, 0x00, 0x12, 0x26, 0x0a, 0x22, 0x53, 0x4f, 0x43, 0x4b, 0x5f, 0x58, 0x4c, 0x41, 0x54,
- 0x45, 0x5f, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x5f, 0x50, 0x52, 0x45, 0x5f, 0x44, 0x49, 0x52, 0x45,
- 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x46, 0x57, 0x44, 0x10, 0x01, 0x12, 0x27, 0x0a, 0x23, 0x53,
+ 0x48, 0x00, 0x52, 0x12, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x65, 0x67, 0x65,
+ 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x12, 0x4b, 0x0a, 0x0f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69,
+ 0x6e, 0x74, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x18, 0x68, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x20, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x55,
+ 0x70, 0x64, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x48, 0x00, 0x52, 0x0e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x55, 0x70, 0x64,
+ 0x61, 0x74, 0x65, 0x12, 0x42, 0x0a, 0x0e, 0x69, 0x70, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x75,
+ 0x70, 0x64, 0x61, 0x74, 0x65, 0x18, 0x69, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x66, 0x6c,
+ 0x6f, 0x77, 0x2e, 0x49, 0x50, 0x43, 0x61, 0x63, 0x68, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69,
+ 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x0d, 0x69, 0x70, 0x63, 0x61, 0x63, 0x68,
+ 0x65, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x48, 0x0a, 0x0e, 0x73, 0x65, 0x72, 0x76, 0x69,
+ 0x63, 0x65, 0x5f, 0x75, 0x70, 0x73, 0x65, 0x72, 0x74, 0x18, 0x6a, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x1f, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x70,
+ 0x73, 0x65, 0x72, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x48, 0x00, 0x52, 0x0d, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x70, 0x73, 0x65, 0x72,
+ 0x74, 0x12, 0x48, 0x0a, 0x0e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x64, 0x65, 0x6c,
+ 0x65, 0x74, 0x65, 0x18, 0x6b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x66, 0x6c, 0x6f, 0x77,
+ 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f,
+ 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x0d, 0x73, 0x65,
+ 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x0e, 0x0a, 0x0c, 0x6e,
+ 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x4b, 0x0a, 0x11, 0x41,
+ 0x67, 0x65, 0x6e, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e,
+ 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
+ 0x74, 0x79, 0x70, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6e, 0x6f, 0x74, 0x69,
+ 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x42, 0x0a, 0x10, 0x54, 0x69, 0x6d, 0x65,
+ 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2e, 0x0a, 0x04,
+ 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d,
+ 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x22, 0x6d, 0x0a, 0x18,
+ 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69,
+ 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65,
+ 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73,
+ 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x04, 0x52, 0x08, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1d, 0x0a, 0x0a,
+ 0x72, 0x75, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03,
+ 0x52, 0x09, 0x72, 0x75, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x59, 0x0a, 0x19, 0x45,
+ 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x65, 0x67, 0x65, 0x6e, 0x4e, 0x6f, 0x74, 0x69,
+ 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65,
+ 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73,
+ 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x93, 0x01, 0x0a, 0x1a, 0x45, 0x6e, 0x64, 0x70, 0x6f,
+ 0x69, 0x6e, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x04, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18,
+ 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x14, 0x0a,
+ 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72,
+ 0x72, 0x6f, 0x72, 0x12, 0x19, 0x0a, 0x08, 0x70, 0x6f, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18,
+ 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x6f, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1c,
+ 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x99, 0x02, 0x0a,
+ 0x13, 0x49, 0x50, 0x43, 0x61, 0x63, 0x68, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x69, 0x64, 0x72, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x04, 0x63, 0x69, 0x64, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x69, 0x64, 0x65, 0x6e,
+ 0x74, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x69, 0x64, 0x65, 0x6e,
+ 0x74, 0x69, 0x74, 0x79, 0x12, 0x3f, 0x0a, 0x0c, 0x6f, 0x6c, 0x64, 0x5f, 0x69, 0x64, 0x65, 0x6e,
+ 0x74, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e,
+ 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0b, 0x6f, 0x6c, 0x64, 0x49, 0x64, 0x65,
+ 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x17, 0x0a, 0x07, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x69, 0x70,
+ 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x68, 0x6f, 0x73, 0x74, 0x49, 0x70, 0x12, 0x1e,
+ 0x0a, 0x0b, 0x6f, 0x6c, 0x64, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x69, 0x70, 0x18, 0x05, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x09, 0x6f, 0x6c, 0x64, 0x48, 0x6f, 0x73, 0x74, 0x49, 0x70, 0x12, 0x1f,
+ 0x0a, 0x0b, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x06, 0x20,
+ 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x4b, 0x65, 0x79, 0x12,
+ 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x07, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x19, 0x0a,
+ 0x08, 0x70, 0x6f, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x07, 0x70, 0x6f, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x43, 0x0a, 0x1d, 0x53, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x64, 0x64, 0x72, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x70, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x70, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72,
+ 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x22, 0x9a, 0x03,
+ 0x0a, 0x19, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x4e,
+ 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69,
+ 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x02, 0x69, 0x64, 0x12, 0x4e, 0x0a, 0x10, 0x66,
+ 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x53, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69,
+ 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x64, 0x64, 0x72, 0x52, 0x0f, 0x66, 0x72, 0x6f, 0x6e,
+ 0x74, 0x65, 0x6e, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x50, 0x0a, 0x11, 0x62,
+ 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73,
+ 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x53, 0x65,
+ 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66,
+ 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x64, 0x64, 0x72, 0x52, 0x10, 0x62, 0x61, 0x63,
+ 0x6b, 0x65, 0x6e, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x12, 0x12, 0x0a,
+ 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70,
+ 0x65, 0x12, 0x29, 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x5f, 0x70, 0x6f, 0x6c,
+ 0x69, 0x63, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0d, 0x74,
+ 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x12, 0x0a, 0x04,
+ 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x07, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x2c,
+ 0x0a, 0x12, 0x65, 0x78, 0x74, 0x5f, 0x74, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x5f, 0x70, 0x6f,
+ 0x6c, 0x69, 0x63, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x65, 0x78, 0x74, 0x54,
+ 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x2c, 0x0a, 0x12,
+ 0x69, 0x6e, 0x74, 0x5f, 0x74, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x5f, 0x70, 0x6f, 0x6c, 0x69,
+ 0x63, 0x79, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x69, 0x6e, 0x74, 0x54, 0x72, 0x61,
+ 0x66, 0x66, 0x69, 0x63, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x2b, 0x0a, 0x19, 0x53, 0x65,
+ 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66,
+ 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x0d, 0x52, 0x02, 0x69, 0x64, 0x22, 0x3c, 0x0a, 0x10, 0x4e, 0x65, 0x74, 0x77, 0x6f,
+ 0x72, 0x6b, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x69,
+ 0x6e, 0x64, 0x65, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x69, 0x6e, 0x64, 0x65,
+ 0x78, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xef, 0x02, 0x0a, 0x0a, 0x44, 0x65, 0x62, 0x75, 0x67, 0x45,
+ 0x76, 0x65, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x44, 0x65, 0x62, 0x75, 0x67, 0x45,
+ 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x26,
+ 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e,
+ 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x06,
+ 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x30, 0x0a, 0x04, 0x68, 0x61, 0x73, 0x68, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c,
+ 0x75, 0x65, 0x52, 0x04, 0x68, 0x61, 0x73, 0x68, 0x12, 0x30, 0x0a, 0x04, 0x61, 0x72, 0x67, 0x31,
+ 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56,
+ 0x61, 0x6c, 0x75, 0x65, 0x52, 0x04, 0x61, 0x72, 0x67, 0x31, 0x12, 0x30, 0x0a, 0x04, 0x61, 0x72,
+ 0x67, 0x32, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33,
+ 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x04, 0x61, 0x72, 0x67, 0x32, 0x12, 0x30, 0x0a, 0x04,
+ 0x61, 0x72, 0x67, 0x33, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e,
+ 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x04, 0x61, 0x72, 0x67, 0x33, 0x12, 0x18,
+ 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2d, 0x0a, 0x03, 0x63, 0x70, 0x75, 0x18,
+ 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c,
+ 0x75, 0x65, 0x52, 0x03, 0x63, 0x70, 0x75, 0x2a, 0x39, 0x0a, 0x08, 0x46, 0x6c, 0x6f, 0x77, 0x54,
+ 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a, 0x0c, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x54,
+ 0x59, 0x50, 0x45, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x4c, 0x33, 0x5f, 0x4c, 0x34, 0x10, 0x01,
+ 0x12, 0x06, 0x0a, 0x02, 0x4c, 0x37, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x53, 0x4f, 0x43, 0x4b,
+ 0x10, 0x03, 0x2a, 0x39, 0x0a, 0x08, 0x41, 0x75, 0x74, 0x68, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0c,
+ 0x0a, 0x08, 0x44, 0x49, 0x53, 0x41, 0x42, 0x4c, 0x45, 0x44, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05,
+ 0x53, 0x50, 0x49, 0x52, 0x45, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x45, 0x53, 0x54, 0x5f,
+ 0x41, 0x4c, 0x57, 0x41, 0x59, 0x53, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x10, 0x02, 0x2a, 0xea, 0x01,
+ 0x0a, 0x15, 0x54, 0x72, 0x61, 0x63, 0x65, 0x4f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x11, 0x0a, 0x0d, 0x55, 0x4e, 0x4b, 0x4e, 0x4f,
+ 0x57, 0x4e, 0x5f, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x54, 0x4f,
+ 0x5f, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x54, 0x4f, 0x5f, 0x48,
+ 0x4f, 0x53, 0x54, 0x10, 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x54, 0x4f, 0x5f, 0x53, 0x54, 0x41, 0x43,
+ 0x4b, 0x10, 0x03, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x4f, 0x5f, 0x4f, 0x56, 0x45, 0x52, 0x4c, 0x41,
+ 0x59, 0x10, 0x04, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x4f, 0x5f, 0x45, 0x4e, 0x44, 0x50, 0x4f, 0x49,
+ 0x4e, 0x54, 0x10, 0x65, 0x12, 0x11, 0x0a, 0x0d, 0x46, 0x52, 0x4f, 0x4d, 0x5f, 0x45, 0x4e, 0x44,
+ 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x10, 0x05, 0x12, 0x0e, 0x0a, 0x0a, 0x46, 0x52, 0x4f, 0x4d, 0x5f,
+ 0x50, 0x52, 0x4f, 0x58, 0x59, 0x10, 0x06, 0x12, 0x0d, 0x0a, 0x09, 0x46, 0x52, 0x4f, 0x4d, 0x5f,
+ 0x48, 0x4f, 0x53, 0x54, 0x10, 0x07, 0x12, 0x0e, 0x0a, 0x0a, 0x46, 0x52, 0x4f, 0x4d, 0x5f, 0x53,
+ 0x54, 0x41, 0x43, 0x4b, 0x10, 0x08, 0x12, 0x10, 0x0a, 0x0c, 0x46, 0x52, 0x4f, 0x4d, 0x5f, 0x4f,
+ 0x56, 0x45, 0x52, 0x4c, 0x41, 0x59, 0x10, 0x09, 0x12, 0x10, 0x0a, 0x0c, 0x46, 0x52, 0x4f, 0x4d,
+ 0x5f, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x10, 0x0a, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x4f,
+ 0x5f, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x10, 0x0b, 0x2a, 0x48, 0x0a, 0x0a, 0x4c, 0x37,
+ 0x46, 0x6c, 0x6f, 0x77, 0x54, 0x79, 0x70, 0x65, 0x12, 0x13, 0x0a, 0x0f, 0x55, 0x4e, 0x4b, 0x4e,
+ 0x4f, 0x57, 0x4e, 0x5f, 0x4c, 0x37, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x10, 0x00, 0x12, 0x0b, 0x0a,
+ 0x07, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x52, 0x45,
+ 0x53, 0x50, 0x4f, 0x4e, 0x53, 0x45, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x41, 0x4d, 0x50,
+ 0x4c, 0x45, 0x10, 0x03, 0x2a, 0x30, 0x0a, 0x09, 0x49, 0x50, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f,
+ 0x6e, 0x12, 0x0f, 0x0a, 0x0b, 0x49, 0x50, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x55, 0x53, 0x45, 0x44,
+ 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x49, 0x50, 0x76, 0x34, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04,
+ 0x49, 0x50, 0x76, 0x36, 0x10, 0x02, 0x2a, 0x7c, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x64, 0x69, 0x63,
+ 0x74, 0x12, 0x13, 0x0a, 0x0f, 0x56, 0x45, 0x52, 0x44, 0x49, 0x43, 0x54, 0x5f, 0x55, 0x4e, 0x4b,
+ 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x46, 0x4f, 0x52, 0x57, 0x41, 0x52,
+ 0x44, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x52, 0x4f, 0x50, 0x50, 0x45, 0x44,
+ 0x10, 0x02, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x03, 0x12, 0x09, 0x0a,
+ 0x05, 0x41, 0x55, 0x44, 0x49, 0x54, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x52, 0x45, 0x44, 0x49,
+ 0x52, 0x45, 0x43, 0x54, 0x45, 0x44, 0x10, 0x05, 0x12, 0x0a, 0x0a, 0x06, 0x54, 0x52, 0x41, 0x43,
+ 0x45, 0x44, 0x10, 0x06, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x52, 0x41, 0x4e, 0x53, 0x4c, 0x41, 0x54,
+ 0x45, 0x44, 0x10, 0x07, 0x2a, 0xf0, 0x0f, 0x0a, 0x0a, 0x44, 0x72, 0x6f, 0x70, 0x52, 0x65, 0x61,
+ 0x73, 0x6f, 0x6e, 0x12, 0x17, 0x0a, 0x13, 0x44, 0x52, 0x4f, 0x50, 0x5f, 0x52, 0x45, 0x41, 0x53,
+ 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x17, 0x0a, 0x12,
+ 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x4d,
+ 0x41, 0x43, 0x10, 0x82, 0x01, 0x12, 0x1c, 0x0a, 0x17, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44,
+ 0x5f, 0x44, 0x45, 0x53, 0x54, 0x49, 0x4e, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x41, 0x43,
+ 0x10, 0x83, 0x01, 0x12, 0x16, 0x0a, 0x11, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x53,
+ 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x49, 0x50, 0x10, 0x84, 0x01, 0x12, 0x12, 0x0a, 0x0d, 0x50,
+ 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x5f, 0x44, 0x45, 0x4e, 0x49, 0x45, 0x44, 0x10, 0x85, 0x01, 0x12,
+ 0x1b, 0x0a, 0x16, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x50, 0x41, 0x43, 0x4b, 0x45,
+ 0x54, 0x5f, 0x44, 0x52, 0x4f, 0x50, 0x50, 0x45, 0x44, 0x10, 0x86, 0x01, 0x12, 0x23, 0x0a, 0x1e,
+ 0x43, 0x54, 0x5f, 0x54, 0x52, 0x55, 0x4e, 0x43, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x4f, 0x52, 0x5f,
+ 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x87,
+ 0x01, 0x12, 0x1c, 0x0a, 0x17, 0x43, 0x54, 0x5f, 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4e, 0x47, 0x5f,
+ 0x54, 0x43, 0x50, 0x5f, 0x41, 0x43, 0x4b, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10, 0x88, 0x01, 0x12,
+ 0x1b, 0x0a, 0x16, 0x43, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x4c, 0x34,
+ 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x10, 0x89, 0x01, 0x12, 0x27, 0x0a, 0x22,
+ 0x43, 0x54, 0x5f, 0x43, 0x41, 0x4e, 0x4e, 0x4f, 0x54, 0x5f, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45,
+ 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x5f, 0x46, 0x52, 0x4f, 0x4d, 0x5f, 0x50, 0x41, 0x43, 0x4b,
+ 0x45, 0x54, 0x10, 0x8a, 0x01, 0x12, 0x1c, 0x0a, 0x17, 0x55, 0x4e, 0x53, 0x55, 0x50, 0x50, 0x4f,
+ 0x52, 0x54, 0x45, 0x44, 0x5f, 0x4c, 0x33, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c,
+ 0x10, 0x8b, 0x01, 0x12, 0x15, 0x0a, 0x10, 0x4d, 0x49, 0x53, 0x53, 0x45, 0x44, 0x5f, 0x54, 0x41,
+ 0x49, 0x4c, 0x5f, 0x43, 0x41, 0x4c, 0x4c, 0x10, 0x8c, 0x01, 0x12, 0x1c, 0x0a, 0x17, 0x45, 0x52,
+ 0x52, 0x4f, 0x52, 0x5f, 0x57, 0x52, 0x49, 0x54, 0x49, 0x4e, 0x47, 0x5f, 0x54, 0x4f, 0x5f, 0x50,
+ 0x41, 0x43, 0x4b, 0x45, 0x54, 0x10, 0x8d, 0x01, 0x12, 0x18, 0x0a, 0x13, 0x55, 0x4e, 0x4b, 0x4e,
+ 0x4f, 0x57, 0x4e, 0x5f, 0x4c, 0x34, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x10,
+ 0x8e, 0x01, 0x12, 0x18, 0x0a, 0x13, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x49, 0x43,
+ 0x4d, 0x50, 0x56, 0x34, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x10, 0x8f, 0x01, 0x12, 0x18, 0x0a, 0x13,
+ 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x49, 0x43, 0x4d, 0x50, 0x56, 0x34, 0x5f, 0x54,
+ 0x59, 0x50, 0x45, 0x10, 0x90, 0x01, 0x12, 0x18, 0x0a, 0x13, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57,
+ 0x4e, 0x5f, 0x49, 0x43, 0x4d, 0x50, 0x56, 0x36, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x10, 0x91, 0x01,
+ 0x12, 0x18, 0x0a, 0x13, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x49, 0x43, 0x4d, 0x50,
+ 0x56, 0x36, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x10, 0x92, 0x01, 0x12, 0x20, 0x0a, 0x1b, 0x45, 0x52,
+ 0x52, 0x4f, 0x52, 0x5f, 0x52, 0x45, 0x54, 0x52, 0x49, 0x45, 0x56, 0x49, 0x4e, 0x47, 0x5f, 0x54,
+ 0x55, 0x4e, 0x4e, 0x45, 0x4c, 0x5f, 0x4b, 0x45, 0x59, 0x10, 0x93, 0x01, 0x12, 0x24, 0x0a, 0x1f,
+ 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x52, 0x45, 0x54, 0x52, 0x49, 0x45, 0x56, 0x49, 0x4e, 0x47,
+ 0x5f, 0x54, 0x55, 0x4e, 0x4e, 0x45, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x53, 0x10,
+ 0x94, 0x01, 0x12, 0x1a, 0x0a, 0x15, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x47, 0x45,
+ 0x4e, 0x45, 0x56, 0x45, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x95, 0x01, 0x12, 0x1e,
+ 0x0a, 0x19, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x4c, 0x33, 0x5f, 0x54, 0x41, 0x52,
+ 0x47, 0x45, 0x54, 0x5f, 0x41, 0x44, 0x44, 0x52, 0x45, 0x53, 0x53, 0x10, 0x96, 0x01, 0x12, 0x1b,
+ 0x0a, 0x16, 0x53, 0x54, 0x41, 0x4c, 0x45, 0x5f, 0x4f, 0x52, 0x5f, 0x55, 0x4e, 0x52, 0x4f, 0x55,
+ 0x54, 0x41, 0x42, 0x4c, 0x45, 0x5f, 0x49, 0x50, 0x10, 0x97, 0x01, 0x12, 0x26, 0x0a, 0x21, 0x4e,
+ 0x4f, 0x5f, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x49, 0x4e, 0x47, 0x5f, 0x4c, 0x4f, 0x43, 0x41, 0x4c,
+ 0x5f, 0x43, 0x4f, 0x4e, 0x54, 0x41, 0x49, 0x4e, 0x45, 0x52, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44,
+ 0x10, 0x98, 0x01, 0x12, 0x27, 0x0a, 0x22, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x57, 0x48, 0x49,
+ 0x4c, 0x45, 0x5f, 0x43, 0x4f, 0x52, 0x52, 0x45, 0x43, 0x54, 0x49, 0x4e, 0x47, 0x5f, 0x4c, 0x33,
+ 0x5f, 0x43, 0x48, 0x45, 0x43, 0x4b, 0x53, 0x55, 0x4d, 0x10, 0x99, 0x01, 0x12, 0x27, 0x0a, 0x22,
+ 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x57, 0x48, 0x49, 0x4c, 0x45, 0x5f, 0x43, 0x4f, 0x52, 0x52,
+ 0x45, 0x43, 0x54, 0x49, 0x4e, 0x47, 0x5f, 0x4c, 0x34, 0x5f, 0x43, 0x48, 0x45, 0x43, 0x4b, 0x53,
+ 0x55, 0x4d, 0x10, 0x9a, 0x01, 0x12, 0x1c, 0x0a, 0x17, 0x43, 0x54, 0x5f, 0x4d, 0x41, 0x50, 0x5f,
+ 0x49, 0x4e, 0x53, 0x45, 0x52, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44,
+ 0x10, 0x9b, 0x01, 0x12, 0x22, 0x0a, 0x1d, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x49,
+ 0x50, 0x56, 0x36, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x48, 0x45,
+ 0x41, 0x44, 0x45, 0x52, 0x10, 0x9c, 0x01, 0x12, 0x23, 0x0a, 0x1e, 0x49, 0x50, 0x5f, 0x46, 0x52,
+ 0x41, 0x47, 0x4d, 0x45, 0x4e, 0x54, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4e, 0x4f, 0x54, 0x5f,
+ 0x53, 0x55, 0x50, 0x50, 0x4f, 0x52, 0x54, 0x45, 0x44, 0x10, 0x9d, 0x01, 0x12, 0x1e, 0x0a, 0x19,
+ 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x42, 0x41, 0x43, 0x4b, 0x45, 0x4e, 0x44, 0x5f,
+ 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x9e, 0x01, 0x12, 0x28, 0x0a, 0x23,
+ 0x4e, 0x4f, 0x5f, 0x54, 0x55, 0x4e, 0x4e, 0x45, 0x4c, 0x5f, 0x4f, 0x52, 0x5f, 0x45, 0x4e, 0x43,
+ 0x41, 0x50, 0x53, 0x55, 0x4c, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x45, 0x4e, 0x44, 0x50, 0x4f,
+ 0x49, 0x4e, 0x54, 0x10, 0xa0, 0x01, 0x12, 0x23, 0x0a, 0x1e, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44,
+ 0x5f, 0x54, 0x4f, 0x5f, 0x49, 0x4e, 0x53, 0x45, 0x52, 0x54, 0x5f, 0x49, 0x4e, 0x54, 0x4f, 0x5f,
+ 0x50, 0x52, 0x4f, 0x58, 0x59, 0x4d, 0x41, 0x50, 0x10, 0xa1, 0x01, 0x12, 0x2b, 0x0a, 0x26, 0x52,
+ 0x45, 0x41, 0x43, 0x48, 0x45, 0x44, 0x5f, 0x45, 0x44, 0x54, 0x5f, 0x52, 0x41, 0x54, 0x45, 0x5f,
+ 0x4c, 0x49, 0x4d, 0x49, 0x54, 0x49, 0x4e, 0x47, 0x5f, 0x44, 0x52, 0x4f, 0x50, 0x5f, 0x48, 0x4f,
+ 0x52, 0x49, 0x5a, 0x4f, 0x4e, 0x10, 0xa2, 0x01, 0x12, 0x26, 0x0a, 0x21, 0x55, 0x4e, 0x4b, 0x4e,
+ 0x4f, 0x57, 0x4e, 0x5f, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x54,
+ 0x52, 0x41, 0x43, 0x4b, 0x49, 0x4e, 0x47, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x10, 0xa3, 0x01,
+ 0x12, 0x1e, 0x0a, 0x19, 0x4c, 0x4f, 0x43, 0x41, 0x4c, 0x5f, 0x48, 0x4f, 0x53, 0x54, 0x5f, 0x49,
+ 0x53, 0x5f, 0x55, 0x4e, 0x52, 0x45, 0x41, 0x43, 0x48, 0x41, 0x42, 0x4c, 0x45, 0x10, 0xa4, 0x01,
+ 0x12, 0x3a, 0x0a, 0x35, 0x4e, 0x4f, 0x5f, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, 0x55, 0x52, 0x41,
+ 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x41, 0x56, 0x41, 0x49, 0x4c, 0x41, 0x42, 0x4c, 0x45, 0x5f, 0x54,
+ 0x4f, 0x5f, 0x50, 0x45, 0x52, 0x46, 0x4f, 0x52, 0x4d, 0x5f, 0x50, 0x4f, 0x4c, 0x49, 0x43, 0x59,
+ 0x5f, 0x44, 0x45, 0x43, 0x49, 0x53, 0x49, 0x4f, 0x4e, 0x10, 0xa5, 0x01, 0x12, 0x1c, 0x0a, 0x17,
+ 0x55, 0x4e, 0x53, 0x55, 0x50, 0x50, 0x4f, 0x52, 0x54, 0x45, 0x44, 0x5f, 0x4c, 0x32, 0x5f, 0x50,
+ 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x10, 0xa6, 0x01, 0x12, 0x22, 0x0a, 0x1d, 0x4e, 0x4f,
+ 0x5f, 0x4d, 0x41, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x5f, 0x46, 0x4f, 0x52, 0x5f, 0x4e, 0x41, 0x54,
+ 0x5f, 0x4d, 0x41, 0x53, 0x51, 0x55, 0x45, 0x52, 0x41, 0x44, 0x45, 0x10, 0xa7, 0x01, 0x12, 0x2c,
+ 0x0a, 0x27, 0x55, 0x4e, 0x53, 0x55, 0x50, 0x50, 0x4f, 0x52, 0x54, 0x45, 0x44, 0x5f, 0x50, 0x52,
+ 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x46, 0x4f, 0x52, 0x5f, 0x4e, 0x41, 0x54, 0x5f, 0x4d,
+ 0x41, 0x53, 0x51, 0x55, 0x45, 0x52, 0x41, 0x44, 0x45, 0x10, 0xa8, 0x01, 0x12, 0x16, 0x0a, 0x11,
+ 0x46, 0x49, 0x42, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45,
+ 0x44, 0x10, 0xa9, 0x01, 0x12, 0x28, 0x0a, 0x23, 0x45, 0x4e, 0x43, 0x41, 0x50, 0x53, 0x55, 0x4c,
+ 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x54, 0x52, 0x41, 0x46, 0x46, 0x49, 0x43, 0x5f, 0x49, 0x53,
+ 0x5f, 0x50, 0x52, 0x4f, 0x48, 0x49, 0x42, 0x49, 0x54, 0x45, 0x44, 0x10, 0xaa, 0x01, 0x12, 0x15,
+ 0x0a, 0x10, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x49, 0x44, 0x45, 0x4e, 0x54, 0x49,
+ 0x54, 0x59, 0x10, 0xab, 0x01, 0x12, 0x13, 0x0a, 0x0e, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e,
+ 0x5f, 0x53, 0x45, 0x4e, 0x44, 0x45, 0x52, 0x10, 0xac, 0x01, 0x12, 0x13, 0x0a, 0x0e, 0x4e, 0x41,
+ 0x54, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x4e, 0x45, 0x45, 0x44, 0x45, 0x44, 0x10, 0xad, 0x01, 0x12,
+ 0x13, 0x0a, 0x0e, 0x49, 0x53, 0x5f, 0x41, 0x5f, 0x43, 0x4c, 0x55, 0x53, 0x54, 0x45, 0x52, 0x49,
+ 0x50, 0x10, 0xae, 0x01, 0x12, 0x2e, 0x0a, 0x29, 0x46, 0x49, 0x52, 0x53, 0x54, 0x5f, 0x4c, 0x4f,
+ 0x47, 0x49, 0x43, 0x41, 0x4c, 0x5f, 0x44, 0x41, 0x54, 0x41, 0x47, 0x52, 0x41, 0x4d, 0x5f, 0x46,
+ 0x52, 0x41, 0x47, 0x4d, 0x45, 0x4e, 0x54, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e,
+ 0x44, 0x10, 0xaf, 0x01, 0x12, 0x1d, 0x0a, 0x18, 0x46, 0x4f, 0x52, 0x42, 0x49, 0x44, 0x44, 0x45,
+ 0x4e, 0x5f, 0x49, 0x43, 0x4d, 0x50, 0x56, 0x36, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45,
+ 0x10, 0xb0, 0x01, 0x12, 0x21, 0x0a, 0x1c, 0x44, 0x45, 0x4e, 0x49, 0x45, 0x44, 0x5f, 0x42, 0x59,
+ 0x5f, 0x4c, 0x42, 0x5f, 0x53, 0x52, 0x43, 0x5f, 0x52, 0x41, 0x4e, 0x47, 0x45, 0x5f, 0x43, 0x48,
+ 0x45, 0x43, 0x4b, 0x10, 0xb1, 0x01, 0x12, 0x19, 0x0a, 0x14, 0x53, 0x4f, 0x43, 0x4b, 0x45, 0x54,
+ 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0xb2,
+ 0x01, 0x12, 0x19, 0x0a, 0x14, 0x53, 0x4f, 0x43, 0x4b, 0x45, 0x54, 0x5f, 0x41, 0x53, 0x53, 0x49,
+ 0x47, 0x4e, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0xb3, 0x01, 0x12, 0x31, 0x0a, 0x2c,
+ 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x52, 0x45, 0x44, 0x49, 0x52, 0x45, 0x43, 0x54, 0x49, 0x4f,
+ 0x4e, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x53, 0x55, 0x50, 0x50, 0x4f, 0x52, 0x54, 0x45, 0x44, 0x5f,
+ 0x46, 0x4f, 0x52, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x10, 0xb4, 0x01, 0x12,
+ 0x10, 0x0a, 0x0b, 0x50, 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x5f, 0x44, 0x45, 0x4e, 0x59, 0x10, 0xb5,
+ 0x01, 0x12, 0x12, 0x0a, 0x0d, 0x56, 0x4c, 0x41, 0x4e, 0x5f, 0x46, 0x49, 0x4c, 0x54, 0x45, 0x52,
+ 0x45, 0x44, 0x10, 0xb6, 0x01, 0x12, 0x10, 0x0a, 0x0b, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44,
+ 0x5f, 0x56, 0x4e, 0x49, 0x10, 0xb7, 0x01, 0x12, 0x16, 0x0a, 0x11, 0x49, 0x4e, 0x56, 0x41, 0x4c,
+ 0x49, 0x44, 0x5f, 0x54, 0x43, 0x5f, 0x42, 0x55, 0x46, 0x46, 0x45, 0x52, 0x10, 0xb8, 0x01, 0x12,
+ 0x0b, 0x0a, 0x06, 0x4e, 0x4f, 0x5f, 0x53, 0x49, 0x44, 0x10, 0xb9, 0x01, 0x12, 0x17, 0x0a, 0x12,
+ 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4e, 0x47, 0x5f, 0x53, 0x52, 0x56, 0x36, 0x5f, 0x53, 0x54, 0x41,
+ 0x54, 0x45, 0x10, 0xba, 0x01, 0x12, 0x0a, 0x0a, 0x05, 0x4e, 0x41, 0x54, 0x34, 0x36, 0x10, 0xbb,
+ 0x01, 0x12, 0x0a, 0x0a, 0x05, 0x4e, 0x41, 0x54, 0x36, 0x34, 0x10, 0xbc, 0x01, 0x12, 0x12, 0x0a,
+ 0x0d, 0x41, 0x55, 0x54, 0x48, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0xbd,
+ 0x01, 0x12, 0x14, 0x0a, 0x0f, 0x43, 0x54, 0x5f, 0x4e, 0x4f, 0x5f, 0x4d, 0x41, 0x50, 0x5f, 0x46,
+ 0x4f, 0x55, 0x4e, 0x44, 0x10, 0xbe, 0x01, 0x12, 0x16, 0x0a, 0x11, 0x53, 0x4e, 0x41, 0x54, 0x5f,
+ 0x4e, 0x4f, 0x5f, 0x4d, 0x41, 0x50, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0xbf, 0x01, 0x12,
+ 0x17, 0x0a, 0x12, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x43, 0x4c, 0x55, 0x53, 0x54,
+ 0x45, 0x52, 0x5f, 0x49, 0x44, 0x10, 0xc0, 0x01, 0x12, 0x27, 0x0a, 0x22, 0x55, 0x4e, 0x53, 0x55,
+ 0x50, 0x50, 0x4f, 0x52, 0x54, 0x45, 0x44, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c,
+ 0x5f, 0x46, 0x4f, 0x52, 0x5f, 0x44, 0x53, 0x52, 0x5f, 0x45, 0x4e, 0x43, 0x41, 0x50, 0x10, 0xc1,
+ 0x01, 0x12, 0x16, 0x0a, 0x11, 0x4e, 0x4f, 0x5f, 0x45, 0x47, 0x52, 0x45, 0x53, 0x53, 0x5f, 0x47,
+ 0x41, 0x54, 0x45, 0x57, 0x41, 0x59, 0x10, 0xc2, 0x01, 0x12, 0x18, 0x0a, 0x13, 0x55, 0x4e, 0x45,
+ 0x4e, 0x43, 0x52, 0x59, 0x50, 0x54, 0x45, 0x44, 0x5f, 0x54, 0x52, 0x41, 0x46, 0x46, 0x49, 0x43,
+ 0x10, 0xc3, 0x01, 0x12, 0x11, 0x0a, 0x0c, 0x54, 0x54, 0x4c, 0x5f, 0x45, 0x58, 0x43, 0x45, 0x45,
+ 0x44, 0x45, 0x44, 0x10, 0xc4, 0x01, 0x12, 0x0f, 0x0a, 0x0a, 0x4e, 0x4f, 0x5f, 0x4e, 0x4f, 0x44,
+ 0x45, 0x5f, 0x49, 0x44, 0x10, 0xc5, 0x01, 0x2a, 0x4a, 0x0a, 0x10, 0x54, 0x72, 0x61, 0x66, 0x66,
+ 0x69, 0x63, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1d, 0x0a, 0x19, 0x54,
+ 0x52, 0x41, 0x46, 0x46, 0x49, 0x43, 0x5f, 0x44, 0x49, 0x52, 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e,
+ 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e,
+ 0x47, 0x52, 0x45, 0x53, 0x53, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x45, 0x47, 0x52, 0x45, 0x53,
+ 0x53, 0x10, 0x02, 0x2a, 0x8d, 0x02, 0x0a, 0x11, 0x44, 0x65, 0x62, 0x75, 0x67, 0x43, 0x61, 0x70,
+ 0x74, 0x75, 0x72, 0x65, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x19, 0x44, 0x42, 0x47,
+ 0x5f, 0x43, 0x41, 0x50, 0x54, 0x55, 0x52, 0x45, 0x5f, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x5f, 0x55,
+ 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x18, 0x0a, 0x14, 0x44, 0x42, 0x47, 0x5f,
+ 0x43, 0x41, 0x50, 0x54, 0x55, 0x52, 0x45, 0x5f, 0x44, 0x45, 0x4c, 0x49, 0x56, 0x45, 0x52, 0x59,
+ 0x10, 0x04, 0x12, 0x17, 0x0a, 0x13, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x41, 0x50, 0x54, 0x55, 0x52,
+ 0x45, 0x5f, 0x46, 0x52, 0x4f, 0x4d, 0x5f, 0x4c, 0x42, 0x10, 0x05, 0x12, 0x19, 0x0a, 0x15, 0x44,
+ 0x42, 0x47, 0x5f, 0x43, 0x41, 0x50, 0x54, 0x55, 0x52, 0x45, 0x5f, 0x41, 0x46, 0x54, 0x45, 0x52,
+ 0x5f, 0x56, 0x34, 0x36, 0x10, 0x06, 0x12, 0x19, 0x0a, 0x15, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x41,
+ 0x50, 0x54, 0x55, 0x52, 0x45, 0x5f, 0x41, 0x46, 0x54, 0x45, 0x52, 0x5f, 0x56, 0x36, 0x34, 0x10,
+ 0x07, 0x12, 0x19, 0x0a, 0x15, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x41, 0x50, 0x54, 0x55, 0x52, 0x45,
+ 0x5f, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x50, 0x52, 0x45, 0x10, 0x08, 0x12, 0x1a, 0x0a, 0x16,
+ 0x44, 0x42, 0x47, 0x5f, 0x43, 0x41, 0x50, 0x54, 0x55, 0x52, 0x45, 0x5f, 0x50, 0x52, 0x4f, 0x58,
+ 0x59, 0x5f, 0x50, 0x4f, 0x53, 0x54, 0x10, 0x09, 0x12, 0x18, 0x0a, 0x14, 0x44, 0x42, 0x47, 0x5f,
+ 0x43, 0x41, 0x50, 0x54, 0x55, 0x52, 0x45, 0x5f, 0x53, 0x4e, 0x41, 0x54, 0x5f, 0x50, 0x52, 0x45,
+ 0x10, 0x0a, 0x12, 0x19, 0x0a, 0x15, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x41, 0x50, 0x54, 0x55, 0x52,
+ 0x45, 0x5f, 0x53, 0x4e, 0x41, 0x54, 0x5f, 0x50, 0x4f, 0x53, 0x54, 0x10, 0x0b, 0x22, 0x04, 0x08,
+ 0x01, 0x10, 0x03, 0x2a, 0x39, 0x0a, 0x09, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65,
+ 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0f, 0x0a,
+ 0x0b, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x10, 0x09, 0x12, 0x0e,
+ 0x0a, 0x0a, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x4c, 0x6f, 0x73, 0x74, 0x10, 0x02, 0x2a, 0x7f,
+ 0x0a, 0x0f, 0x4c, 0x6f, 0x73, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x53, 0x6f, 0x75, 0x72, 0x63,
+ 0x65, 0x12, 0x1d, 0x0a, 0x19, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x4c, 0x4f, 0x53,
+ 0x54, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x10, 0x00,
+ 0x12, 0x1a, 0x0a, 0x16, 0x50, 0x45, 0x52, 0x46, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x52,
+ 0x49, 0x4e, 0x47, 0x5f, 0x42, 0x55, 0x46, 0x46, 0x45, 0x52, 0x10, 0x01, 0x12, 0x19, 0x0a, 0x15,
+ 0x4f, 0x42, 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x53, 0x5f,
+ 0x51, 0x55, 0x45, 0x55, 0x45, 0x10, 0x02, 0x12, 0x16, 0x0a, 0x12, 0x48, 0x55, 0x42, 0x42, 0x4c,
+ 0x45, 0x5f, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x42, 0x55, 0x46, 0x46, 0x45, 0x52, 0x10, 0x03, 0x2a,
+ 0xae, 0x02, 0x0a, 0x0e, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79,
+ 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x41, 0x47, 0x45, 0x4e, 0x54, 0x5f, 0x45, 0x56, 0x45, 0x4e,
+ 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x41,
+ 0x47, 0x45, 0x4e, 0x54, 0x5f, 0x53, 0x54, 0x41, 0x52, 0x54, 0x45, 0x44, 0x10, 0x02, 0x12, 0x12,
+ 0x0a, 0x0e, 0x50, 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x5f, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x44,
+ 0x10, 0x03, 0x12, 0x12, 0x0a, 0x0e, 0x50, 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x5f, 0x44, 0x45, 0x4c,
+ 0x45, 0x54, 0x45, 0x44, 0x10, 0x04, 0x12, 0x1f, 0x0a, 0x1b, 0x45, 0x4e, 0x44, 0x50, 0x4f, 0x49,
+ 0x4e, 0x54, 0x5f, 0x52, 0x45, 0x47, 0x45, 0x4e, 0x45, 0x52, 0x41, 0x54, 0x45, 0x5f, 0x53, 0x55,
+ 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x05, 0x12, 0x1f, 0x0a, 0x1b, 0x45, 0x4e, 0x44, 0x50, 0x4f,
+ 0x49, 0x4e, 0x54, 0x5f, 0x52, 0x45, 0x47, 0x45, 0x4e, 0x45, 0x52, 0x41, 0x54, 0x45, 0x5f, 0x46,
+ 0x41, 0x49, 0x4c, 0x55, 0x52, 0x45, 0x10, 0x06, 0x12, 0x14, 0x0a, 0x10, 0x45, 0x4e, 0x44, 0x50,
+ 0x4f, 0x49, 0x4e, 0x54, 0x5f, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, 0x07, 0x12, 0x14,
+ 0x0a, 0x10, 0x45, 0x4e, 0x44, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54,
+ 0x45, 0x44, 0x10, 0x08, 0x12, 0x14, 0x0a, 0x10, 0x49, 0x50, 0x43, 0x41, 0x43, 0x48, 0x45, 0x5f,
+ 0x55, 0x50, 0x53, 0x45, 0x52, 0x54, 0x45, 0x44, 0x10, 0x09, 0x12, 0x13, 0x0a, 0x0f, 0x49, 0x50,
+ 0x43, 0x41, 0x43, 0x48, 0x45, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, 0x0a, 0x12,
+ 0x14, 0x0a, 0x10, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x55, 0x50, 0x53, 0x45, 0x52,
+ 0x54, 0x45, 0x44, 0x10, 0x0b, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45,
+ 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, 0x0c, 0x22, 0x04, 0x08, 0x01, 0x10, 0x01,
+ 0x2a, 0xd8, 0x01, 0x0a, 0x16, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x54, 0x72, 0x61, 0x6e, 0x73,
+ 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x1c, 0x0a, 0x18, 0x53,
0x4f, 0x43, 0x4b, 0x5f, 0x58, 0x4c, 0x41, 0x54, 0x45, 0x5f, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x5f,
- 0x50, 0x4f, 0x53, 0x54, 0x5f, 0x44, 0x49, 0x52, 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x46,
- 0x57, 0x44, 0x10, 0x02, 0x12, 0x26, 0x0a, 0x22, 0x53, 0x4f, 0x43, 0x4b, 0x5f, 0x58, 0x4c, 0x41,
- 0x54, 0x45, 0x5f, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x5f, 0x50, 0x52, 0x45, 0x5f, 0x44, 0x49, 0x52,
- 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x45, 0x56, 0x10, 0x03, 0x12, 0x27, 0x0a, 0x23,
- 0x53, 0x4f, 0x43, 0x4b, 0x5f, 0x58, 0x4c, 0x41, 0x54, 0x45, 0x5f, 0x50, 0x4f, 0x49, 0x4e, 0x54,
- 0x5f, 0x50, 0x4f, 0x53, 0x54, 0x5f, 0x44, 0x49, 0x52, 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f,
- 0x52, 0x45, 0x56, 0x10, 0x04, 0x2a, 0xdd, 0x0c, 0x0a, 0x0e, 0x44, 0x65, 0x62, 0x75, 0x67, 0x45,
- 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x44, 0x42, 0x47, 0x5f,
- 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12,
- 0x0f, 0x0a, 0x0b, 0x44, 0x42, 0x47, 0x5f, 0x47, 0x45, 0x4e, 0x45, 0x52, 0x49, 0x43, 0x10, 0x01,
- 0x12, 0x16, 0x0a, 0x12, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x4f, 0x43, 0x41, 0x4c, 0x5f, 0x44, 0x45,
- 0x4c, 0x49, 0x56, 0x45, 0x52, 0x59, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x42, 0x47, 0x5f,
- 0x45, 0x4e, 0x43, 0x41, 0x50, 0x10, 0x03, 0x12, 0x11, 0x0a, 0x0d, 0x44, 0x42, 0x47, 0x5f, 0x4c,
- 0x58, 0x43, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x44, 0x42,
- 0x47, 0x5f, 0x50, 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x5f, 0x44, 0x45, 0x4e, 0x49, 0x45, 0x44, 0x10,
- 0x05, 0x12, 0x11, 0x0a, 0x0d, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x54, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b,
- 0x55, 0x50, 0x10, 0x06, 0x12, 0x15, 0x0a, 0x11, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x54, 0x5f, 0x4c,
- 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x5f, 0x52, 0x45, 0x56, 0x10, 0x07, 0x12, 0x10, 0x0a, 0x0c, 0x44,
- 0x42, 0x47, 0x5f, 0x43, 0x54, 0x5f, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x08, 0x12, 0x12, 0x0a,
- 0x0e, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x54, 0x5f, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x44, 0x10,
- 0x09, 0x12, 0x13, 0x0a, 0x0f, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x54, 0x5f, 0x43, 0x52, 0x45, 0x41,
- 0x54, 0x45, 0x44, 0x32, 0x10, 0x0a, 0x12, 0x14, 0x0a, 0x10, 0x44, 0x42, 0x47, 0x5f, 0x49, 0x43,
- 0x4d, 0x50, 0x36, 0x5f, 0x48, 0x41, 0x4e, 0x44, 0x4c, 0x45, 0x10, 0x0b, 0x12, 0x15, 0x0a, 0x11,
- 0x44, 0x42, 0x47, 0x5f, 0x49, 0x43, 0x4d, 0x50, 0x36, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53,
- 0x54, 0x10, 0x0c, 0x12, 0x10, 0x0a, 0x0c, 0x44, 0x42, 0x47, 0x5f, 0x49, 0x43, 0x4d, 0x50, 0x36,
- 0x5f, 0x4e, 0x53, 0x10, 0x0d, 0x12, 0x1b, 0x0a, 0x17, 0x44, 0x42, 0x47, 0x5f, 0x49, 0x43, 0x4d,
- 0x50, 0x36, 0x5f, 0x54, 0x49, 0x4d, 0x45, 0x5f, 0x45, 0x58, 0x43, 0x45, 0x45, 0x44, 0x45, 0x44,
- 0x10, 0x0e, 0x12, 0x12, 0x0a, 0x0e, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x54, 0x5f, 0x56, 0x45, 0x52,
- 0x44, 0x49, 0x43, 0x54, 0x10, 0x0f, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x42, 0x47, 0x5f, 0x44, 0x45,
- 0x43, 0x41, 0x50, 0x10, 0x10, 0x12, 0x10, 0x0a, 0x0c, 0x44, 0x42, 0x47, 0x5f, 0x50, 0x4f, 0x52,
- 0x54, 0x5f, 0x4d, 0x41, 0x50, 0x10, 0x11, 0x12, 0x11, 0x0a, 0x0d, 0x44, 0x42, 0x47, 0x5f, 0x45,
- 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x52, 0x45, 0x54, 0x10, 0x12, 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x42,
- 0x47, 0x5f, 0x54, 0x4f, 0x5f, 0x48, 0x4f, 0x53, 0x54, 0x10, 0x13, 0x12, 0x10, 0x0a, 0x0c, 0x44,
- 0x42, 0x47, 0x5f, 0x54, 0x4f, 0x5f, 0x53, 0x54, 0x41, 0x43, 0x4b, 0x10, 0x14, 0x12, 0x10, 0x0a,
- 0x0c, 0x44, 0x42, 0x47, 0x5f, 0x50, 0x4b, 0x54, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x10, 0x15, 0x12,
- 0x1b, 0x0a, 0x17, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x36, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55,
- 0x50, 0x5f, 0x46, 0x52, 0x4f, 0x4e, 0x54, 0x45, 0x4e, 0x44, 0x10, 0x16, 0x12, 0x20, 0x0a, 0x1c,
- 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x36, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x5f, 0x46,
- 0x52, 0x4f, 0x4e, 0x54, 0x45, 0x4e, 0x44, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x10, 0x17, 0x12, 0x1f,
- 0x0a, 0x1b, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x36, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50,
- 0x5f, 0x42, 0x41, 0x43, 0x4b, 0x45, 0x4e, 0x44, 0x5f, 0x53, 0x4c, 0x4f, 0x54, 0x10, 0x18, 0x12,
- 0x27, 0x0a, 0x23, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x36, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55,
- 0x50, 0x5f, 0x42, 0x41, 0x43, 0x4b, 0x45, 0x4e, 0x44, 0x5f, 0x53, 0x4c, 0x4f, 0x54, 0x5f, 0x53,
- 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x19, 0x12, 0x27, 0x0a, 0x23, 0x44, 0x42, 0x47, 0x5f,
- 0x4c, 0x42, 0x36, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x5f, 0x42, 0x41, 0x43, 0x4b, 0x45,
- 0x4e, 0x44, 0x5f, 0x53, 0x4c, 0x4f, 0x54, 0x5f, 0x56, 0x32, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x10,
- 0x1a, 0x12, 0x1f, 0x0a, 0x1b, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x36, 0x5f, 0x4c, 0x4f, 0x4f,
- 0x4b, 0x55, 0x50, 0x5f, 0x42, 0x41, 0x43, 0x4b, 0x45, 0x4e, 0x44, 0x5f, 0x46, 0x41, 0x49, 0x4c,
- 0x10, 0x1b, 0x12, 0x1e, 0x0a, 0x1a, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x36, 0x5f, 0x52, 0x45,
- 0x56, 0x45, 0x52, 0x53, 0x45, 0x5f, 0x4e, 0x41, 0x54, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50,
- 0x10, 0x1c, 0x12, 0x17, 0x0a, 0x13, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x36, 0x5f, 0x52, 0x45,
- 0x56, 0x45, 0x52, 0x53, 0x45, 0x5f, 0x4e, 0x41, 0x54, 0x10, 0x1d, 0x12, 0x1b, 0x0a, 0x17, 0x44,
- 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x34, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x5f, 0x46, 0x52,
- 0x4f, 0x4e, 0x54, 0x45, 0x4e, 0x44, 0x10, 0x1e, 0x12, 0x20, 0x0a, 0x1c, 0x44, 0x42, 0x47, 0x5f,
- 0x4c, 0x42, 0x34, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x5f, 0x46, 0x52, 0x4f, 0x4e, 0x54,
- 0x45, 0x4e, 0x44, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x10, 0x1f, 0x12, 0x1f, 0x0a, 0x1b, 0x44, 0x42,
- 0x47, 0x5f, 0x4c, 0x42, 0x34, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x5f, 0x42, 0x41, 0x43,
- 0x4b, 0x45, 0x4e, 0x44, 0x5f, 0x53, 0x4c, 0x4f, 0x54, 0x10, 0x20, 0x12, 0x27, 0x0a, 0x23, 0x44,
- 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x34, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x5f, 0x42, 0x41,
- 0x43, 0x4b, 0x45, 0x4e, 0x44, 0x5f, 0x53, 0x4c, 0x4f, 0x54, 0x5f, 0x53, 0x55, 0x43, 0x43, 0x45,
- 0x53, 0x53, 0x10, 0x21, 0x12, 0x27, 0x0a, 0x23, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x34, 0x5f,
+ 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x26, 0x0a, 0x22, 0x53, 0x4f, 0x43,
+ 0x4b, 0x5f, 0x58, 0x4c, 0x41, 0x54, 0x45, 0x5f, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x5f, 0x50, 0x52,
+ 0x45, 0x5f, 0x44, 0x49, 0x52, 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x46, 0x57, 0x44, 0x10,
+ 0x01, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x4f, 0x43, 0x4b, 0x5f, 0x58, 0x4c, 0x41, 0x54, 0x45, 0x5f,
+ 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x5f, 0x50, 0x4f, 0x53, 0x54, 0x5f, 0x44, 0x49, 0x52, 0x45, 0x43,
+ 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x46, 0x57, 0x44, 0x10, 0x02, 0x12, 0x26, 0x0a, 0x22, 0x53, 0x4f,
+ 0x43, 0x4b, 0x5f, 0x58, 0x4c, 0x41, 0x54, 0x45, 0x5f, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x5f, 0x50,
+ 0x52, 0x45, 0x5f, 0x44, 0x49, 0x52, 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x45, 0x56,
+ 0x10, 0x03, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x4f, 0x43, 0x4b, 0x5f, 0x58, 0x4c, 0x41, 0x54, 0x45,
+ 0x5f, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x5f, 0x50, 0x4f, 0x53, 0x54, 0x5f, 0x44, 0x49, 0x52, 0x45,
+ 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x45, 0x56, 0x10, 0x04, 0x2a, 0x81, 0x0d, 0x0a, 0x0e,
+ 0x44, 0x65, 0x62, 0x75, 0x67, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x15,
+ 0x0a, 0x11, 0x44, 0x42, 0x47, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e,
+ 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x42, 0x47, 0x5f, 0x47, 0x45, 0x4e,
+ 0x45, 0x52, 0x49, 0x43, 0x10, 0x01, 0x12, 0x16, 0x0a, 0x12, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x4f,
+ 0x43, 0x41, 0x4c, 0x5f, 0x44, 0x45, 0x4c, 0x49, 0x56, 0x45, 0x52, 0x59, 0x10, 0x02, 0x12, 0x0d,
+ 0x0a, 0x09, 0x44, 0x42, 0x47, 0x5f, 0x45, 0x4e, 0x43, 0x41, 0x50, 0x10, 0x03, 0x12, 0x11, 0x0a,
+ 0x0d, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x58, 0x43, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x04,
+ 0x12, 0x15, 0x0a, 0x11, 0x44, 0x42, 0x47, 0x5f, 0x50, 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x5f, 0x44,
+ 0x45, 0x4e, 0x49, 0x45, 0x44, 0x10, 0x05, 0x12, 0x11, 0x0a, 0x0d, 0x44, 0x42, 0x47, 0x5f, 0x43,
+ 0x54, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x10, 0x06, 0x12, 0x15, 0x0a, 0x11, 0x44, 0x42,
+ 0x47, 0x5f, 0x43, 0x54, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x5f, 0x52, 0x45, 0x56, 0x10,
+ 0x07, 0x12, 0x10, 0x0a, 0x0c, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x54, 0x5f, 0x4d, 0x41, 0x54, 0x43,
+ 0x48, 0x10, 0x08, 0x12, 0x12, 0x0a, 0x0e, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x54, 0x5f, 0x43, 0x52,
+ 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, 0x09, 0x12, 0x13, 0x0a, 0x0f, 0x44, 0x42, 0x47, 0x5f, 0x43,
+ 0x54, 0x5f, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x44, 0x32, 0x10, 0x0a, 0x12, 0x14, 0x0a, 0x10,
+ 0x44, 0x42, 0x47, 0x5f, 0x49, 0x43, 0x4d, 0x50, 0x36, 0x5f, 0x48, 0x41, 0x4e, 0x44, 0x4c, 0x45,
+ 0x10, 0x0b, 0x12, 0x15, 0x0a, 0x11, 0x44, 0x42, 0x47, 0x5f, 0x49, 0x43, 0x4d, 0x50, 0x36, 0x5f,
+ 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x10, 0x0c, 0x12, 0x10, 0x0a, 0x0c, 0x44, 0x42, 0x47,
+ 0x5f, 0x49, 0x43, 0x4d, 0x50, 0x36, 0x5f, 0x4e, 0x53, 0x10, 0x0d, 0x12, 0x1b, 0x0a, 0x17, 0x44,
+ 0x42, 0x47, 0x5f, 0x49, 0x43, 0x4d, 0x50, 0x36, 0x5f, 0x54, 0x49, 0x4d, 0x45, 0x5f, 0x45, 0x58,
+ 0x43, 0x45, 0x45, 0x44, 0x45, 0x44, 0x10, 0x0e, 0x12, 0x12, 0x0a, 0x0e, 0x44, 0x42, 0x47, 0x5f,
+ 0x43, 0x54, 0x5f, 0x56, 0x45, 0x52, 0x44, 0x49, 0x43, 0x54, 0x10, 0x0f, 0x12, 0x0d, 0x0a, 0x09,
+ 0x44, 0x42, 0x47, 0x5f, 0x44, 0x45, 0x43, 0x41, 0x50, 0x10, 0x10, 0x12, 0x10, 0x0a, 0x0c, 0x44,
+ 0x42, 0x47, 0x5f, 0x50, 0x4f, 0x52, 0x54, 0x5f, 0x4d, 0x41, 0x50, 0x10, 0x11, 0x12, 0x11, 0x0a,
+ 0x0d, 0x44, 0x42, 0x47, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x52, 0x45, 0x54, 0x10, 0x12,
+ 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x42, 0x47, 0x5f, 0x54, 0x4f, 0x5f, 0x48, 0x4f, 0x53, 0x54, 0x10,
+ 0x13, 0x12, 0x10, 0x0a, 0x0c, 0x44, 0x42, 0x47, 0x5f, 0x54, 0x4f, 0x5f, 0x53, 0x54, 0x41, 0x43,
+ 0x4b, 0x10, 0x14, 0x12, 0x10, 0x0a, 0x0c, 0x44, 0x42, 0x47, 0x5f, 0x50, 0x4b, 0x54, 0x5f, 0x48,
+ 0x41, 0x53, 0x48, 0x10, 0x15, 0x12, 0x1b, 0x0a, 0x17, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x36,
+ 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x5f, 0x46, 0x52, 0x4f, 0x4e, 0x54, 0x45, 0x4e, 0x44,
+ 0x10, 0x16, 0x12, 0x20, 0x0a, 0x1c, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x36, 0x5f, 0x4c, 0x4f,
+ 0x4f, 0x4b, 0x55, 0x50, 0x5f, 0x46, 0x52, 0x4f, 0x4e, 0x54, 0x45, 0x4e, 0x44, 0x5f, 0x46, 0x41,
+ 0x49, 0x4c, 0x10, 0x17, 0x12, 0x1f, 0x0a, 0x1b, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x36, 0x5f,
0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x5f, 0x42, 0x41, 0x43, 0x4b, 0x45, 0x4e, 0x44, 0x5f, 0x53,
- 0x4c, 0x4f, 0x54, 0x5f, 0x56, 0x32, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x10, 0x22, 0x12, 0x1f, 0x0a,
- 0x1b, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x34, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x5f,
- 0x42, 0x41, 0x43, 0x4b, 0x45, 0x4e, 0x44, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x10, 0x23, 0x12, 0x1e,
- 0x0a, 0x1a, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x34, 0x5f, 0x52, 0x45, 0x56, 0x45, 0x52, 0x53,
- 0x45, 0x5f, 0x4e, 0x41, 0x54, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x10, 0x24, 0x12, 0x17,
- 0x0a, 0x13, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x34, 0x5f, 0x52, 0x45, 0x56, 0x45, 0x52, 0x53,
- 0x45, 0x5f, 0x4e, 0x41, 0x54, 0x10, 0x25, 0x12, 0x19, 0x0a, 0x15, 0x44, 0x42, 0x47, 0x5f, 0x4c,
- 0x42, 0x34, 0x5f, 0x4c, 0x4f, 0x4f, 0x50, 0x42, 0x41, 0x43, 0x4b, 0x5f, 0x53, 0x4e, 0x41, 0x54,
- 0x10, 0x26, 0x12, 0x1d, 0x0a, 0x19, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x34, 0x5f, 0x4c, 0x4f,
- 0x4f, 0x50, 0x42, 0x41, 0x43, 0x4b, 0x5f, 0x53, 0x4e, 0x41, 0x54, 0x5f, 0x52, 0x45, 0x56, 0x10,
- 0x27, 0x12, 0x12, 0x0a, 0x0e, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x54, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b,
- 0x55, 0x50, 0x34, 0x10, 0x28, 0x12, 0x1b, 0x0a, 0x17, 0x44, 0x42, 0x47, 0x5f, 0x52, 0x52, 0x5f,
- 0x42, 0x41, 0x43, 0x4b, 0x45, 0x4e, 0x44, 0x5f, 0x53, 0x4c, 0x4f, 0x54, 0x5f, 0x53, 0x45, 0x4c,
- 0x10, 0x29, 0x12, 0x18, 0x0a, 0x14, 0x44, 0x42, 0x47, 0x5f, 0x52, 0x45, 0x56, 0x5f, 0x50, 0x52,
- 0x4f, 0x58, 0x59, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x10, 0x2a, 0x12, 0x17, 0x0a, 0x13,
- 0x44, 0x42, 0x47, 0x5f, 0x52, 0x45, 0x56, 0x5f, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x46, 0x4f,
- 0x55, 0x4e, 0x44, 0x10, 0x2b, 0x12, 0x18, 0x0a, 0x14, 0x44, 0x42, 0x47, 0x5f, 0x52, 0x45, 0x56,
- 0x5f, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x10, 0x2c, 0x12,
- 0x11, 0x0a, 0x0d, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x34, 0x5f, 0x50, 0x4f, 0x4c, 0x49, 0x43, 0x59,
- 0x10, 0x2d, 0x12, 0x19, 0x0a, 0x15, 0x44, 0x42, 0x47, 0x5f, 0x4e, 0x45, 0x54, 0x44, 0x45, 0x56,
- 0x5f, 0x49, 0x4e, 0x5f, 0x43, 0x4c, 0x55, 0x53, 0x54, 0x45, 0x52, 0x10, 0x2e, 0x12, 0x15, 0x0a,
- 0x11, 0x44, 0x42, 0x47, 0x5f, 0x4e, 0x45, 0x54, 0x44, 0x45, 0x56, 0x5f, 0x45, 0x4e, 0x43, 0x41,
- 0x50, 0x34, 0x10, 0x2f, 0x12, 0x14, 0x0a, 0x10, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x54, 0x5f, 0x4c,
- 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x34, 0x5f, 0x31, 0x10, 0x30, 0x12, 0x14, 0x0a, 0x10, 0x44, 0x42,
- 0x47, 0x5f, 0x43, 0x54, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x34, 0x5f, 0x32, 0x10, 0x31,
- 0x12, 0x13, 0x0a, 0x0f, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x54, 0x5f, 0x43, 0x52, 0x45, 0x41, 0x54,
- 0x45, 0x44, 0x34, 0x10, 0x32, 0x12, 0x14, 0x0a, 0x10, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x54, 0x5f,
- 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x36, 0x5f, 0x31, 0x10, 0x33, 0x12, 0x14, 0x0a, 0x10, 0x44,
- 0x42, 0x47, 0x5f, 0x43, 0x54, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x36, 0x5f, 0x32, 0x10,
- 0x34, 0x12, 0x13, 0x0a, 0x0f, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x54, 0x5f, 0x43, 0x52, 0x45, 0x41,
- 0x54, 0x45, 0x44, 0x36, 0x10, 0x35, 0x12, 0x12, 0x0a, 0x0e, 0x44, 0x42, 0x47, 0x5f, 0x53, 0x4b,
- 0x49, 0x50, 0x5f, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x10, 0x36, 0x12, 0x11, 0x0a, 0x0d, 0x44, 0x42,
- 0x47, 0x5f, 0x4c, 0x34, 0x5f, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x10, 0x37, 0x12, 0x19, 0x0a,
- 0x15, 0x44, 0x42, 0x47, 0x5f, 0x49, 0x50, 0x5f, 0x49, 0x44, 0x5f, 0x4d, 0x41, 0x50, 0x5f, 0x46,
- 0x41, 0x49, 0x4c, 0x45, 0x44, 0x34, 0x10, 0x38, 0x12, 0x19, 0x0a, 0x15, 0x44, 0x42, 0x47, 0x5f,
- 0x49, 0x50, 0x5f, 0x49, 0x44, 0x5f, 0x4d, 0x41, 0x50, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44,
- 0x36, 0x10, 0x39, 0x12, 0x1a, 0x0a, 0x16, 0x44, 0x42, 0x47, 0x5f, 0x49, 0x50, 0x5f, 0x49, 0x44,
- 0x5f, 0x4d, 0x41, 0x50, 0x5f, 0x53, 0x55, 0x43, 0x43, 0x45, 0x45, 0x44, 0x34, 0x10, 0x3a, 0x12,
- 0x1a, 0x0a, 0x16, 0x44, 0x42, 0x47, 0x5f, 0x49, 0x50, 0x5f, 0x49, 0x44, 0x5f, 0x4d, 0x41, 0x50,
- 0x5f, 0x53, 0x55, 0x43, 0x43, 0x45, 0x45, 0x44, 0x36, 0x10, 0x3b, 0x12, 0x13, 0x0a, 0x0f, 0x44,
- 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x5f, 0x53, 0x54, 0x41, 0x4c, 0x45, 0x5f, 0x43, 0x54, 0x10, 0x3c,
- 0x12, 0x18, 0x0a, 0x14, 0x44, 0x42, 0x47, 0x5f, 0x49, 0x4e, 0x48, 0x45, 0x52, 0x49, 0x54, 0x5f,
- 0x49, 0x44, 0x45, 0x4e, 0x54, 0x49, 0x54, 0x59, 0x10, 0x3d, 0x12, 0x12, 0x0a, 0x0e, 0x44, 0x42,
- 0x47, 0x5f, 0x53, 0x4b, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x34, 0x10, 0x3e, 0x12, 0x12,
- 0x0a, 0x0e, 0x44, 0x42, 0x47, 0x5f, 0x53, 0x4b, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x36,
- 0x10, 0x3f, 0x12, 0x11, 0x0a, 0x0d, 0x44, 0x42, 0x47, 0x5f, 0x53, 0x4b, 0x5f, 0x41, 0x53, 0x53,
- 0x49, 0x47, 0x4e, 0x10, 0x40, 0x42, 0x26, 0x5a, 0x24, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e,
- 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2f, 0x63, 0x69, 0x6c, 0x69, 0x75,
- 0x6d, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x66, 0x6c, 0x6f, 0x77, 0x62, 0x06, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x4c, 0x4f, 0x54, 0x10, 0x18, 0x12, 0x27, 0x0a, 0x23, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x36,
+ 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x5f, 0x42, 0x41, 0x43, 0x4b, 0x45, 0x4e, 0x44, 0x5f,
+ 0x53, 0x4c, 0x4f, 0x54, 0x5f, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x19, 0x12, 0x27,
+ 0x0a, 0x23, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x36, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50,
+ 0x5f, 0x42, 0x41, 0x43, 0x4b, 0x45, 0x4e, 0x44, 0x5f, 0x53, 0x4c, 0x4f, 0x54, 0x5f, 0x56, 0x32,
+ 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x10, 0x1a, 0x12, 0x1f, 0x0a, 0x1b, 0x44, 0x42, 0x47, 0x5f, 0x4c,
+ 0x42, 0x36, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x5f, 0x42, 0x41, 0x43, 0x4b, 0x45, 0x4e,
+ 0x44, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x10, 0x1b, 0x12, 0x1e, 0x0a, 0x1a, 0x44, 0x42, 0x47, 0x5f,
+ 0x4c, 0x42, 0x36, 0x5f, 0x52, 0x45, 0x56, 0x45, 0x52, 0x53, 0x45, 0x5f, 0x4e, 0x41, 0x54, 0x5f,
+ 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x10, 0x1c, 0x12, 0x17, 0x0a, 0x13, 0x44, 0x42, 0x47, 0x5f,
+ 0x4c, 0x42, 0x36, 0x5f, 0x52, 0x45, 0x56, 0x45, 0x52, 0x53, 0x45, 0x5f, 0x4e, 0x41, 0x54, 0x10,
+ 0x1d, 0x12, 0x1b, 0x0a, 0x17, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x34, 0x5f, 0x4c, 0x4f, 0x4f,
+ 0x4b, 0x55, 0x50, 0x5f, 0x46, 0x52, 0x4f, 0x4e, 0x54, 0x45, 0x4e, 0x44, 0x10, 0x1e, 0x12, 0x20,
+ 0x0a, 0x1c, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x34, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50,
+ 0x5f, 0x46, 0x52, 0x4f, 0x4e, 0x54, 0x45, 0x4e, 0x44, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x10, 0x1f,
+ 0x12, 0x1f, 0x0a, 0x1b, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x34, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b,
+ 0x55, 0x50, 0x5f, 0x42, 0x41, 0x43, 0x4b, 0x45, 0x4e, 0x44, 0x5f, 0x53, 0x4c, 0x4f, 0x54, 0x10,
+ 0x20, 0x12, 0x27, 0x0a, 0x23, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x34, 0x5f, 0x4c, 0x4f, 0x4f,
+ 0x4b, 0x55, 0x50, 0x5f, 0x42, 0x41, 0x43, 0x4b, 0x45, 0x4e, 0x44, 0x5f, 0x53, 0x4c, 0x4f, 0x54,
+ 0x5f, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x21, 0x12, 0x27, 0x0a, 0x23, 0x44, 0x42,
+ 0x47, 0x5f, 0x4c, 0x42, 0x34, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x5f, 0x42, 0x41, 0x43,
+ 0x4b, 0x45, 0x4e, 0x44, 0x5f, 0x53, 0x4c, 0x4f, 0x54, 0x5f, 0x56, 0x32, 0x5f, 0x46, 0x41, 0x49,
+ 0x4c, 0x10, 0x22, 0x12, 0x1f, 0x0a, 0x1b, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x34, 0x5f, 0x4c,
+ 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x5f, 0x42, 0x41, 0x43, 0x4b, 0x45, 0x4e, 0x44, 0x5f, 0x46, 0x41,
+ 0x49, 0x4c, 0x10, 0x23, 0x12, 0x1e, 0x0a, 0x1a, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x34, 0x5f,
+ 0x52, 0x45, 0x56, 0x45, 0x52, 0x53, 0x45, 0x5f, 0x4e, 0x41, 0x54, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b,
+ 0x55, 0x50, 0x10, 0x24, 0x12, 0x17, 0x0a, 0x13, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x34, 0x5f,
+ 0x52, 0x45, 0x56, 0x45, 0x52, 0x53, 0x45, 0x5f, 0x4e, 0x41, 0x54, 0x10, 0x25, 0x12, 0x19, 0x0a,
+ 0x15, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x34, 0x5f, 0x4c, 0x4f, 0x4f, 0x50, 0x42, 0x41, 0x43,
+ 0x4b, 0x5f, 0x53, 0x4e, 0x41, 0x54, 0x10, 0x26, 0x12, 0x1d, 0x0a, 0x19, 0x44, 0x42, 0x47, 0x5f,
+ 0x4c, 0x42, 0x34, 0x5f, 0x4c, 0x4f, 0x4f, 0x50, 0x42, 0x41, 0x43, 0x4b, 0x5f, 0x53, 0x4e, 0x41,
+ 0x54, 0x5f, 0x52, 0x45, 0x56, 0x10, 0x27, 0x12, 0x12, 0x0a, 0x0e, 0x44, 0x42, 0x47, 0x5f, 0x43,
+ 0x54, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x34, 0x10, 0x28, 0x12, 0x1b, 0x0a, 0x17, 0x44,
+ 0x42, 0x47, 0x5f, 0x52, 0x52, 0x5f, 0x42, 0x41, 0x43, 0x4b, 0x45, 0x4e, 0x44, 0x5f, 0x53, 0x4c,
+ 0x4f, 0x54, 0x5f, 0x53, 0x45, 0x4c, 0x10, 0x29, 0x12, 0x18, 0x0a, 0x14, 0x44, 0x42, 0x47, 0x5f,
+ 0x52, 0x45, 0x56, 0x5f, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50,
+ 0x10, 0x2a, 0x12, 0x17, 0x0a, 0x13, 0x44, 0x42, 0x47, 0x5f, 0x52, 0x45, 0x56, 0x5f, 0x50, 0x52,
+ 0x4f, 0x58, 0x59, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x2b, 0x12, 0x18, 0x0a, 0x14, 0x44,
+ 0x42, 0x47, 0x5f, 0x52, 0x45, 0x56, 0x5f, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x55, 0x50, 0x44,
+ 0x41, 0x54, 0x45, 0x10, 0x2c, 0x12, 0x11, 0x0a, 0x0d, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x34, 0x5f,
+ 0x50, 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x10, 0x2d, 0x12, 0x19, 0x0a, 0x15, 0x44, 0x42, 0x47, 0x5f,
+ 0x4e, 0x45, 0x54, 0x44, 0x45, 0x56, 0x5f, 0x49, 0x4e, 0x5f, 0x43, 0x4c, 0x55, 0x53, 0x54, 0x45,
+ 0x52, 0x10, 0x2e, 0x12, 0x15, 0x0a, 0x11, 0x44, 0x42, 0x47, 0x5f, 0x4e, 0x45, 0x54, 0x44, 0x45,
+ 0x56, 0x5f, 0x45, 0x4e, 0x43, 0x41, 0x50, 0x34, 0x10, 0x2f, 0x12, 0x14, 0x0a, 0x10, 0x44, 0x42,
+ 0x47, 0x5f, 0x43, 0x54, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x34, 0x5f, 0x31, 0x10, 0x30,
+ 0x12, 0x14, 0x0a, 0x10, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x54, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55,
+ 0x50, 0x34, 0x5f, 0x32, 0x10, 0x31, 0x12, 0x13, 0x0a, 0x0f, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x54,
+ 0x5f, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x44, 0x34, 0x10, 0x32, 0x12, 0x14, 0x0a, 0x10, 0x44,
+ 0x42, 0x47, 0x5f, 0x43, 0x54, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x36, 0x5f, 0x31, 0x10,
+ 0x33, 0x12, 0x14, 0x0a, 0x10, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x54, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b,
+ 0x55, 0x50, 0x36, 0x5f, 0x32, 0x10, 0x34, 0x12, 0x13, 0x0a, 0x0f, 0x44, 0x42, 0x47, 0x5f, 0x43,
+ 0x54, 0x5f, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x44, 0x36, 0x10, 0x35, 0x12, 0x12, 0x0a, 0x0e,
+ 0x44, 0x42, 0x47, 0x5f, 0x53, 0x4b, 0x49, 0x50, 0x5f, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x10, 0x36,
+ 0x12, 0x11, 0x0a, 0x0d, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x34, 0x5f, 0x43, 0x52, 0x45, 0x41, 0x54,
+ 0x45, 0x10, 0x37, 0x12, 0x19, 0x0a, 0x15, 0x44, 0x42, 0x47, 0x5f, 0x49, 0x50, 0x5f, 0x49, 0x44,
+ 0x5f, 0x4d, 0x41, 0x50, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x34, 0x10, 0x38, 0x12, 0x19,
+ 0x0a, 0x15, 0x44, 0x42, 0x47, 0x5f, 0x49, 0x50, 0x5f, 0x49, 0x44, 0x5f, 0x4d, 0x41, 0x50, 0x5f,
+ 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x36, 0x10, 0x39, 0x12, 0x1a, 0x0a, 0x16, 0x44, 0x42, 0x47,
+ 0x5f, 0x49, 0x50, 0x5f, 0x49, 0x44, 0x5f, 0x4d, 0x41, 0x50, 0x5f, 0x53, 0x55, 0x43, 0x43, 0x45,
+ 0x45, 0x44, 0x34, 0x10, 0x3a, 0x12, 0x1a, 0x0a, 0x16, 0x44, 0x42, 0x47, 0x5f, 0x49, 0x50, 0x5f,
+ 0x49, 0x44, 0x5f, 0x4d, 0x41, 0x50, 0x5f, 0x53, 0x55, 0x43, 0x43, 0x45, 0x45, 0x44, 0x36, 0x10,
+ 0x3b, 0x12, 0x13, 0x0a, 0x0f, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x5f, 0x53, 0x54, 0x41, 0x4c,
+ 0x45, 0x5f, 0x43, 0x54, 0x10, 0x3c, 0x12, 0x18, 0x0a, 0x14, 0x44, 0x42, 0x47, 0x5f, 0x49, 0x4e,
+ 0x48, 0x45, 0x52, 0x49, 0x54, 0x5f, 0x49, 0x44, 0x45, 0x4e, 0x54, 0x49, 0x54, 0x59, 0x10, 0x3d,
+ 0x12, 0x12, 0x0a, 0x0e, 0x44, 0x42, 0x47, 0x5f, 0x53, 0x4b, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55,
+ 0x50, 0x34, 0x10, 0x3e, 0x12, 0x12, 0x0a, 0x0e, 0x44, 0x42, 0x47, 0x5f, 0x53, 0x4b, 0x5f, 0x4c,
+ 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x36, 0x10, 0x3f, 0x12, 0x11, 0x0a, 0x0d, 0x44, 0x42, 0x47, 0x5f,
+ 0x53, 0x4b, 0x5f, 0x41, 0x53, 0x53, 0x49, 0x47, 0x4e, 0x10, 0x40, 0x12, 0x0d, 0x0a, 0x09, 0x44,
+ 0x42, 0x47, 0x5f, 0x4c, 0x37, 0x5f, 0x4c, 0x42, 0x10, 0x41, 0x12, 0x13, 0x0a, 0x0f, 0x44, 0x42,
+ 0x47, 0x5f, 0x53, 0x4b, 0x49, 0x50, 0x5f, 0x50, 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x10, 0x42, 0x42,
+ 0x26, 0x5a, 0x24, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x69,
+ 0x6c, 0x69, 0x75, 0x6d, 0x2f, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2f, 0x61, 0x70, 0x69, 0x2f,
+ 0x76, 0x31, 0x2f, 0x66, 0x6c, 0x6f, 0x77, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
@@ -5310,7 +5451,7 @@ func file_flow_flow_proto_rawDescGZIP() []byte {
}
var file_flow_flow_proto_enumTypes = make([]protoimpl.EnumInfo, 14)
-var file_flow_flow_proto_msgTypes = make([]protoimpl.MessageInfo, 36)
+var file_flow_flow_proto_msgTypes = make([]protoimpl.MessageInfo, 37)
var file_flow_flow_proto_goTypes = []interface{}{
(FlowType)(0), // 0: flow.FlowType
(AuthType)(0), // 1: flow.AuthType
@@ -5341,34 +5482,36 @@ var file_flow_flow_proto_goTypes = []interface{}{
(*SCTP)(nil), // 26: flow.SCTP
(*ICMPv4)(nil), // 27: flow.ICMPv4
(*ICMPv6)(nil), // 28: flow.ICMPv6
- (*EventTypeFilter)(nil), // 29: flow.EventTypeFilter
- (*CiliumEventType)(nil), // 30: flow.CiliumEventType
- (*FlowFilter)(nil), // 31: flow.FlowFilter
- (*DNS)(nil), // 32: flow.DNS
- (*HTTPHeader)(nil), // 33: flow.HTTPHeader
- (*HTTP)(nil), // 34: flow.HTTP
- (*Kafka)(nil), // 35: flow.Kafka
- (*Service)(nil), // 36: flow.Service
- (*LostEvent)(nil), // 37: flow.LostEvent
- (*AgentEvent)(nil), // 38: flow.AgentEvent
- (*AgentEventUnknown)(nil), // 39: flow.AgentEventUnknown
- (*TimeNotification)(nil), // 40: flow.TimeNotification
- (*PolicyUpdateNotification)(nil), // 41: flow.PolicyUpdateNotification
- (*EndpointRegenNotification)(nil), // 42: flow.EndpointRegenNotification
- (*EndpointUpdateNotification)(nil), // 43: flow.EndpointUpdateNotification
- (*IPCacheNotification)(nil), // 44: flow.IPCacheNotification
- (*ServiceUpsertNotificationAddr)(nil), // 45: flow.ServiceUpsertNotificationAddr
- (*ServiceUpsertNotification)(nil), // 46: flow.ServiceUpsertNotification
- (*ServiceDeleteNotification)(nil), // 47: flow.ServiceDeleteNotification
- (*NetworkInterface)(nil), // 48: flow.NetworkInterface
- (*DebugEvent)(nil), // 49: flow.DebugEvent
- (*timestamppb.Timestamp)(nil), // 50: google.protobuf.Timestamp
- (*wrapperspb.BoolValue)(nil), // 51: google.protobuf.BoolValue
- (*wrapperspb.Int32Value)(nil), // 52: google.protobuf.Int32Value
- (*wrapperspb.UInt32Value)(nil), // 53: google.protobuf.UInt32Value
+ (*Policy)(nil), // 29: flow.Policy
+ (*EventTypeFilter)(nil), // 30: flow.EventTypeFilter
+ (*CiliumEventType)(nil), // 31: flow.CiliumEventType
+ (*FlowFilter)(nil), // 32: flow.FlowFilter
+ (*DNS)(nil), // 33: flow.DNS
+ (*HTTPHeader)(nil), // 34: flow.HTTPHeader
+ (*HTTP)(nil), // 35: flow.HTTP
+ (*Kafka)(nil), // 36: flow.Kafka
+ (*Service)(nil), // 37: flow.Service
+ (*LostEvent)(nil), // 38: flow.LostEvent
+ (*AgentEvent)(nil), // 39: flow.AgentEvent
+ (*AgentEventUnknown)(nil), // 40: flow.AgentEventUnknown
+ (*TimeNotification)(nil), // 41: flow.TimeNotification
+ (*PolicyUpdateNotification)(nil), // 42: flow.PolicyUpdateNotification
+ (*EndpointRegenNotification)(nil), // 43: flow.EndpointRegenNotification
+ (*EndpointUpdateNotification)(nil), // 44: flow.EndpointUpdateNotification
+ (*IPCacheNotification)(nil), // 45: flow.IPCacheNotification
+ (*ServiceUpsertNotificationAddr)(nil), // 46: flow.ServiceUpsertNotificationAddr
+ (*ServiceUpsertNotification)(nil), // 47: flow.ServiceUpsertNotification
+ (*ServiceDeleteNotification)(nil), // 48: flow.ServiceDeleteNotification
+ (*NetworkInterface)(nil), // 49: flow.NetworkInterface
+ (*DebugEvent)(nil), // 50: flow.DebugEvent
+ (*timestamppb.Timestamp)(nil), // 51: google.protobuf.Timestamp
+ (*wrapperspb.BoolValue)(nil), // 52: google.protobuf.BoolValue
+ (*anypb.Any)(nil), // 53: google.protobuf.Any
+ (*wrapperspb.Int32Value)(nil), // 54: google.protobuf.Int32Value
+ (*wrapperspb.UInt32Value)(nil), // 55: google.protobuf.UInt32Value
}
var file_flow_flow_proto_depIdxs = []int32{
- 50, // 0: flow.Flow.time:type_name -> google.protobuf.Timestamp
+ 51, // 0: flow.Flow.time:type_name -> google.protobuf.Timestamp
5, // 1: flow.Flow.verdict:type_name -> flow.Verdict
1, // 2: flow.Flow.auth_type:type_name -> flow.AuthType
23, // 3: flow.Flow.ethernet:type_name -> flow.Ethernet
@@ -5378,65 +5521,68 @@ var file_flow_flow_proto_depIdxs = []int32{
19, // 7: flow.Flow.destination:type_name -> flow.Endpoint
0, // 8: flow.Flow.Type:type_name -> flow.FlowType
16, // 9: flow.Flow.l7:type_name -> flow.Layer7
- 30, // 10: flow.Flow.event_type:type_name -> flow.CiliumEventType
- 36, // 11: flow.Flow.source_service:type_name -> flow.Service
- 36, // 12: flow.Flow.destination_service:type_name -> flow.Service
+ 31, // 10: flow.Flow.event_type:type_name -> flow.CiliumEventType
+ 37, // 11: flow.Flow.source_service:type_name -> flow.Service
+ 37, // 12: flow.Flow.destination_service:type_name -> flow.Service
7, // 13: flow.Flow.traffic_direction:type_name -> flow.TrafficDirection
2, // 14: flow.Flow.trace_observation_point:type_name -> flow.TraceObservationPoint
6, // 15: flow.Flow.drop_reason_desc:type_name -> flow.DropReason
- 51, // 16: flow.Flow.is_reply:type_name -> google.protobuf.BoolValue
+ 52, // 16: flow.Flow.is_reply:type_name -> google.protobuf.BoolValue
8, // 17: flow.Flow.debug_capture_point:type_name -> flow.DebugCapturePoint
- 48, // 18: flow.Flow.interface:type_name -> flow.NetworkInterface
+ 49, // 18: flow.Flow.interface:type_name -> flow.NetworkInterface
17, // 19: flow.Flow.trace_context:type_name -> flow.TraceContext
12, // 20: flow.Flow.sock_xlate_point:type_name -> flow.SocketTranslationPoint
- 21, // 21: flow.Layer4.TCP:type_name -> flow.TCP
- 25, // 22: flow.Layer4.UDP:type_name -> flow.UDP
- 27, // 23: flow.Layer4.ICMPv4:type_name -> flow.ICMPv4
- 28, // 24: flow.Layer4.ICMPv6:type_name -> flow.ICMPv6
- 26, // 25: flow.Layer4.SCTP:type_name -> flow.SCTP
- 3, // 26: flow.Layer7.type:type_name -> flow.L7FlowType
- 32, // 27: flow.Layer7.dns:type_name -> flow.DNS
- 34, // 28: flow.Layer7.http:type_name -> flow.HTTP
- 35, // 29: flow.Layer7.kafka:type_name -> flow.Kafka
- 18, // 30: flow.TraceContext.parent:type_name -> flow.TraceParent
- 20, // 31: flow.Endpoint.workloads:type_name -> flow.Workload
- 24, // 32: flow.TCP.flags:type_name -> flow.TCPFlags
- 4, // 33: flow.IP.ipVersion:type_name -> flow.IPVersion
- 20, // 34: flow.FlowFilter.source_workload:type_name -> flow.Workload
- 20, // 35: flow.FlowFilter.destination_workload:type_name -> flow.Workload
- 7, // 36: flow.FlowFilter.traffic_direction:type_name -> flow.TrafficDirection
- 5, // 37: flow.FlowFilter.verdict:type_name -> flow.Verdict
- 29, // 38: flow.FlowFilter.event_type:type_name -> flow.EventTypeFilter
- 24, // 39: flow.FlowFilter.tcp_flags:type_name -> flow.TCPFlags
- 4, // 40: flow.FlowFilter.ip_version:type_name -> flow.IPVersion
- 33, // 41: flow.HTTP.headers:type_name -> flow.HTTPHeader
- 10, // 42: flow.LostEvent.source:type_name -> flow.LostEventSource
- 52, // 43: flow.LostEvent.cpu:type_name -> google.protobuf.Int32Value
- 11, // 44: flow.AgentEvent.type:type_name -> flow.AgentEventType
- 39, // 45: flow.AgentEvent.unknown:type_name -> flow.AgentEventUnknown
- 40, // 46: flow.AgentEvent.agent_start:type_name -> flow.TimeNotification
- 41, // 47: flow.AgentEvent.policy_update:type_name -> flow.PolicyUpdateNotification
- 42, // 48: flow.AgentEvent.endpoint_regenerate:type_name -> flow.EndpointRegenNotification
- 43, // 49: flow.AgentEvent.endpoint_update:type_name -> flow.EndpointUpdateNotification
- 44, // 50: flow.AgentEvent.ipcache_update:type_name -> flow.IPCacheNotification
- 46, // 51: flow.AgentEvent.service_upsert:type_name -> flow.ServiceUpsertNotification
- 47, // 52: flow.AgentEvent.service_delete:type_name -> flow.ServiceDeleteNotification
- 50, // 53: flow.TimeNotification.time:type_name -> google.protobuf.Timestamp
- 53, // 54: flow.IPCacheNotification.old_identity:type_name -> google.protobuf.UInt32Value
- 45, // 55: flow.ServiceUpsertNotification.frontend_address:type_name -> flow.ServiceUpsertNotificationAddr
- 45, // 56: flow.ServiceUpsertNotification.backend_addresses:type_name -> flow.ServiceUpsertNotificationAddr
- 13, // 57: flow.DebugEvent.type:type_name -> flow.DebugEventType
- 19, // 58: flow.DebugEvent.source:type_name -> flow.Endpoint
- 53, // 59: flow.DebugEvent.hash:type_name -> google.protobuf.UInt32Value
- 53, // 60: flow.DebugEvent.arg1:type_name -> google.protobuf.UInt32Value
- 53, // 61: flow.DebugEvent.arg2:type_name -> google.protobuf.UInt32Value
- 53, // 62: flow.DebugEvent.arg3:type_name -> google.protobuf.UInt32Value
- 52, // 63: flow.DebugEvent.cpu:type_name -> google.protobuf.Int32Value
- 64, // [64:64] is the sub-list for method output_type
- 64, // [64:64] is the sub-list for method input_type
- 64, // [64:64] is the sub-list for extension type_name
- 64, // [64:64] is the sub-list for extension extendee
- 0, // [0:64] is the sub-list for field type_name
+ 53, // 21: flow.Flow.extensions:type_name -> google.protobuf.Any
+ 29, // 22: flow.Flow.egress_allowed_by:type_name -> flow.Policy
+ 29, // 23: flow.Flow.ingress_allowed_by:type_name -> flow.Policy
+ 21, // 24: flow.Layer4.TCP:type_name -> flow.TCP
+ 25, // 25: flow.Layer4.UDP:type_name -> flow.UDP
+ 27, // 26: flow.Layer4.ICMPv4:type_name -> flow.ICMPv4
+ 28, // 27: flow.Layer4.ICMPv6:type_name -> flow.ICMPv6
+ 26, // 28: flow.Layer4.SCTP:type_name -> flow.SCTP
+ 3, // 29: flow.Layer7.type:type_name -> flow.L7FlowType
+ 33, // 30: flow.Layer7.dns:type_name -> flow.DNS
+ 35, // 31: flow.Layer7.http:type_name -> flow.HTTP
+ 36, // 32: flow.Layer7.kafka:type_name -> flow.Kafka
+ 18, // 33: flow.TraceContext.parent:type_name -> flow.TraceParent
+ 20, // 34: flow.Endpoint.workloads:type_name -> flow.Workload
+ 24, // 35: flow.TCP.flags:type_name -> flow.TCPFlags
+ 4, // 36: flow.IP.ipVersion:type_name -> flow.IPVersion
+ 20, // 37: flow.FlowFilter.source_workload:type_name -> flow.Workload
+ 20, // 38: flow.FlowFilter.destination_workload:type_name -> flow.Workload
+ 7, // 39: flow.FlowFilter.traffic_direction:type_name -> flow.TrafficDirection
+ 5, // 40: flow.FlowFilter.verdict:type_name -> flow.Verdict
+ 30, // 41: flow.FlowFilter.event_type:type_name -> flow.EventTypeFilter
+ 24, // 42: flow.FlowFilter.tcp_flags:type_name -> flow.TCPFlags
+ 4, // 43: flow.FlowFilter.ip_version:type_name -> flow.IPVersion
+ 34, // 44: flow.HTTP.headers:type_name -> flow.HTTPHeader
+ 10, // 45: flow.LostEvent.source:type_name -> flow.LostEventSource
+ 54, // 46: flow.LostEvent.cpu:type_name -> google.protobuf.Int32Value
+ 11, // 47: flow.AgentEvent.type:type_name -> flow.AgentEventType
+ 40, // 48: flow.AgentEvent.unknown:type_name -> flow.AgentEventUnknown
+ 41, // 49: flow.AgentEvent.agent_start:type_name -> flow.TimeNotification
+ 42, // 50: flow.AgentEvent.policy_update:type_name -> flow.PolicyUpdateNotification
+ 43, // 51: flow.AgentEvent.endpoint_regenerate:type_name -> flow.EndpointRegenNotification
+ 44, // 52: flow.AgentEvent.endpoint_update:type_name -> flow.EndpointUpdateNotification
+ 45, // 53: flow.AgentEvent.ipcache_update:type_name -> flow.IPCacheNotification
+ 47, // 54: flow.AgentEvent.service_upsert:type_name -> flow.ServiceUpsertNotification
+ 48, // 55: flow.AgentEvent.service_delete:type_name -> flow.ServiceDeleteNotification
+ 51, // 56: flow.TimeNotification.time:type_name -> google.protobuf.Timestamp
+ 55, // 57: flow.IPCacheNotification.old_identity:type_name -> google.protobuf.UInt32Value
+ 46, // 58: flow.ServiceUpsertNotification.frontend_address:type_name -> flow.ServiceUpsertNotificationAddr
+ 46, // 59: flow.ServiceUpsertNotification.backend_addresses:type_name -> flow.ServiceUpsertNotificationAddr
+ 13, // 60: flow.DebugEvent.type:type_name -> flow.DebugEventType
+ 19, // 61: flow.DebugEvent.source:type_name -> flow.Endpoint
+ 55, // 62: flow.DebugEvent.hash:type_name -> google.protobuf.UInt32Value
+ 55, // 63: flow.DebugEvent.arg1:type_name -> google.protobuf.UInt32Value
+ 55, // 64: flow.DebugEvent.arg2:type_name -> google.protobuf.UInt32Value
+ 55, // 65: flow.DebugEvent.arg3:type_name -> google.protobuf.UInt32Value
+ 54, // 66: flow.DebugEvent.cpu:type_name -> google.protobuf.Int32Value
+ 67, // [67:67] is the sub-list for method output_type
+ 67, // [67:67] is the sub-list for method input_type
+ 67, // [67:67] is the sub-list for extension type_name
+ 67, // [67:67] is the sub-list for extension extendee
+ 0, // [0:67] is the sub-list for field type_name
}
func init() { file_flow_flow_proto_init() }
@@ -5626,7 +5772,7 @@ func file_flow_flow_proto_init() {
}
}
file_flow_flow_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*EventTypeFilter); i {
+ switch v := v.(*Policy); i {
case 0:
return &v.state
case 1:
@@ -5638,7 +5784,7 @@ func file_flow_flow_proto_init() {
}
}
file_flow_flow_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*CiliumEventType); i {
+ switch v := v.(*EventTypeFilter); i {
case 0:
return &v.state
case 1:
@@ -5650,7 +5796,7 @@ func file_flow_flow_proto_init() {
}
}
file_flow_flow_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*FlowFilter); i {
+ switch v := v.(*CiliumEventType); i {
case 0:
return &v.state
case 1:
@@ -5662,7 +5808,7 @@ func file_flow_flow_proto_init() {
}
}
file_flow_flow_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*DNS); i {
+ switch v := v.(*FlowFilter); i {
case 0:
return &v.state
case 1:
@@ -5674,7 +5820,7 @@ func file_flow_flow_proto_init() {
}
}
file_flow_flow_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*HTTPHeader); i {
+ switch v := v.(*DNS); i {
case 0:
return &v.state
case 1:
@@ -5686,7 +5832,7 @@ func file_flow_flow_proto_init() {
}
}
file_flow_flow_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*HTTP); i {
+ switch v := v.(*HTTPHeader); i {
case 0:
return &v.state
case 1:
@@ -5698,7 +5844,7 @@ func file_flow_flow_proto_init() {
}
}
file_flow_flow_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Kafka); i {
+ switch v := v.(*HTTP); i {
case 0:
return &v.state
case 1:
@@ -5710,7 +5856,7 @@ func file_flow_flow_proto_init() {
}
}
file_flow_flow_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Service); i {
+ switch v := v.(*Kafka); i {
case 0:
return &v.state
case 1:
@@ -5722,7 +5868,7 @@ func file_flow_flow_proto_init() {
}
}
file_flow_flow_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*LostEvent); i {
+ switch v := v.(*Service); i {
case 0:
return &v.state
case 1:
@@ -5734,7 +5880,7 @@ func file_flow_flow_proto_init() {
}
}
file_flow_flow_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*AgentEvent); i {
+ switch v := v.(*LostEvent); i {
case 0:
return &v.state
case 1:
@@ -5746,7 +5892,7 @@ func file_flow_flow_proto_init() {
}
}
file_flow_flow_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*AgentEventUnknown); i {
+ switch v := v.(*AgentEvent); i {
case 0:
return &v.state
case 1:
@@ -5758,7 +5904,7 @@ func file_flow_flow_proto_init() {
}
}
file_flow_flow_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*TimeNotification); i {
+ switch v := v.(*AgentEventUnknown); i {
case 0:
return &v.state
case 1:
@@ -5770,7 +5916,7 @@ func file_flow_flow_proto_init() {
}
}
file_flow_flow_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*PolicyUpdateNotification); i {
+ switch v := v.(*TimeNotification); i {
case 0:
return &v.state
case 1:
@@ -5782,7 +5928,7 @@ func file_flow_flow_proto_init() {
}
}
file_flow_flow_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*EndpointRegenNotification); i {
+ switch v := v.(*PolicyUpdateNotification); i {
case 0:
return &v.state
case 1:
@@ -5794,7 +5940,7 @@ func file_flow_flow_proto_init() {
}
}
file_flow_flow_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*EndpointUpdateNotification); i {
+ switch v := v.(*EndpointRegenNotification); i {
case 0:
return &v.state
case 1:
@@ -5806,7 +5952,7 @@ func file_flow_flow_proto_init() {
}
}
file_flow_flow_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*IPCacheNotification); i {
+ switch v := v.(*EndpointUpdateNotification); i {
case 0:
return &v.state
case 1:
@@ -5818,7 +5964,7 @@ func file_flow_flow_proto_init() {
}
}
file_flow_flow_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ServiceUpsertNotificationAddr); i {
+ switch v := v.(*IPCacheNotification); i {
case 0:
return &v.state
case 1:
@@ -5830,7 +5976,7 @@ func file_flow_flow_proto_init() {
}
}
file_flow_flow_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ServiceUpsertNotification); i {
+ switch v := v.(*ServiceUpsertNotificationAddr); i {
case 0:
return &v.state
case 1:
@@ -5842,7 +5988,7 @@ func file_flow_flow_proto_init() {
}
}
file_flow_flow_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ServiceDeleteNotification); i {
+ switch v := v.(*ServiceUpsertNotification); i {
case 0:
return &v.state
case 1:
@@ -5854,7 +6000,7 @@ func file_flow_flow_proto_init() {
}
}
file_flow_flow_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*NetworkInterface); i {
+ switch v := v.(*ServiceDeleteNotification); i {
case 0:
return &v.state
case 1:
@@ -5866,6 +6012,18 @@ func file_flow_flow_proto_init() {
}
}
file_flow_flow_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*NetworkInterface); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_flow_flow_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*DebugEvent); i {
case 0:
return &v.state
@@ -5890,7 +6048,7 @@ func file_flow_flow_proto_init() {
(*Layer7_Http)(nil),
(*Layer7_Kafka)(nil),
}
- file_flow_flow_proto_msgTypes[24].OneofWrappers = []interface{}{
+ file_flow_flow_proto_msgTypes[25].OneofWrappers = []interface{}{
(*AgentEvent_Unknown)(nil),
(*AgentEvent_AgentStart)(nil),
(*AgentEvent_PolicyUpdate)(nil),
@@ -5906,7 +6064,7 @@ func file_flow_flow_proto_init() {
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_flow_flow_proto_rawDesc,
NumEnums: 14,
- NumMessages: 36,
+ NumMessages: 37,
NumExtensions: 0,
NumServices: 0,
},
diff --git a/vendor/github.com/cilium/cilium/api/v1/flow/flow.pb.json.go b/vendor/github.com/cilium/cilium/api/v1/flow/flow.pb.json.go
index c8588bb3c..09a8e88ac 100644
--- a/vendor/github.com/cilium/cilium/api/v1/flow/flow.pb.json.go
+++ b/vendor/github.com/cilium/cilium/api/v1/flow/flow.pb.json.go
@@ -247,6 +247,22 @@ func (msg *ICMPv6) UnmarshalJSON(b []byte) error {
}.Unmarshal(b, msg)
}
+// MarshalJSON implements json.Marshaler
+func (msg *Policy) MarshalJSON() ([]byte, error) {
+ return protojson.MarshalOptions{
+ UseEnumNumbers: false,
+ EmitUnpopulated: false,
+ UseProtoNames: true,
+ }.Marshal(msg)
+}
+
+// UnmarshalJSON implements json.Unmarshaler
+func (msg *Policy) UnmarshalJSON(b []byte) error {
+ return protojson.UnmarshalOptions{
+ DiscardUnknown: false,
+ }.Unmarshal(b, msg)
+}
+
// MarshalJSON implements json.Marshaler
func (msg *EventTypeFilter) MarshalJSON() ([]byte, error) {
return protojson.MarshalOptions{
diff --git a/vendor/github.com/cilium/cilium/api/v1/flow/flow.proto b/vendor/github.com/cilium/cilium/api/v1/flow/flow.proto
index 11143ea77..e2b1b2131 100644
--- a/vendor/github.com/cilium/cilium/api/v1/flow/flow.proto
+++ b/vendor/github.com/cilium/cilium/api/v1/flow/flow.proto
@@ -3,6 +3,7 @@
syntax = "proto3";
+import "google/protobuf/any.proto";
import "google/protobuf/wrappers.proto";
import "google/protobuf/timestamp.proto";
@@ -114,6 +115,16 @@ message Flow {
// duplicating logic from the old parser. This field will be removed once we
// fully migrate to the new parser.
string Summary = 100000 [deprecated=true];
+
+ // extensions can be used to add arbitrary additional metadata to flows.
+ // This can be used to extend functionality for other Hubble compatible
+ // APIs, or experiment with new functionality without needing to change the public API.
+ google.protobuf.Any extensions = 150000;
+
+ // The CiliumNetworkPolicies allowing the egress of the flow.
+ repeated Policy egress_allowed_by = 21001;
+ // The CiliumNetworkPolicies allowing the ingress of the flow.
+ repeated Policy ingress_allowed_by = 21002;
}
enum FlowType {
@@ -123,7 +134,7 @@ enum FlowType {
SOCK = 3;
}
-// These types correspond to definitions in pkg/policy/l4.go
+// These types correspond to definitions in pkg/policy/l4.go.
enum AuthType {
DISABLED = 0;
SPIRE = 1;
@@ -183,8 +194,7 @@ message Layer4 {
}
}
-// This enum corresponds to Cilium's L7 accesslog FlowType:
-// https://github.com/cilium/cilium/blob/728c79e427438ab6f8d9375b62fccd6fed4ace3a/pkg/proxy/accesslog/record.go#L26
+// This enum corresponds to Cilium's L7 accesslog [FlowType](https://github.com/cilium/cilium/blob/728c79e427438ab6f8d9375b62fccd6fed4ace3a/pkg/proxy/accesslog/record.go#L26):
enum L7FlowType {
UNKNOWN_L7_TYPE = 0;
REQUEST = 1;
@@ -192,8 +202,7 @@ enum L7FlowType {
SAMPLE = 3;
}
-// Message for L7 flow, which roughly corresponds to Cilium's accesslog LogRecord:
-// https://github.com/cilium/cilium/blob/728c79e427438ab6f8d9375b62fccd6fed4ace3a/pkg/proxy/accesslog/record.go#L141
+// Message for L7 flow, which roughly corresponds to Cilium's accesslog [LogRecord](https://github.com/cilium/cilium/blob/728c79e427438ab6f8d9375b62fccd6fed4ace3a/pkg/proxy/accesslog/record.go#L141):
message Layer7 {
L7FlowType type = 1;
// Latency of the response
@@ -206,10 +215,9 @@ message Layer7 {
}
}
-// TraceContext contains trace context propagation data, ie information about a
+// TraceContext contains trace context propagation data, i.e. information about a
// distributed trace.
-// For more information about trace context, check the W3C Trace Context
-// specification: https://www.w3.org/TR/trace-context/
+// For more information about trace context, check the [W3C Trace Context specification](https://www.w3.org/TR/trace-context/).
message TraceContext {
// parent identifies the incoming request in a tracing system.
TraceParent parent = 1;
@@ -391,6 +399,9 @@ enum DropReason {
INVALID_CLUSTER_ID = 192;
UNSUPPORTED_PROTOCOL_FOR_DSR_ENCAP = 193;
NO_EGRESS_GATEWAY = 194;
+ UNENCRYPTED_TRAFFIC = 195;
+ TTL_EXCEEDED = 196;
+ NO_NODE_ID = 197;
}
enum TrafficDirection {
@@ -413,7 +424,14 @@ enum DebugCapturePoint {
DBG_CAPTURE_SNAT_POST = 11;
}
-// EventTypeFilter is a filter describing a particular event type
+message Policy {
+ string name = 1;
+ string namespace = 2;
+ repeated string labels = 3;
+ uint64 revision = 4;
+}
+
+// EventTypeFilter is a filter describing a particular event type.
message EventTypeFilter {
// type is the primary flow type as defined by:
// github.com/cilium/cilium/pkg/monitor/api.MessageType*
@@ -428,7 +446,7 @@ message EventTypeFilter {
int32 sub_type = 3;
}
-// CiliumEventType from which the flow originated
+// CiliumEventType from which the flow originated.
message CiliumEventType {
// type of event the flow originated from, i.e.
// github.com/cilium/cilium/pkg/monitor/api.MessageType*
@@ -513,6 +531,8 @@ message FlowFilter {
repeated string http_method = 21;
// http_path is a list of regular expressions to filter on the HTTP path.
repeated string http_path = 22;
+ // http_url is a list of regular expressions to filter on the HTTP URL.
+ repeated string http_url = 31;
// tcp_flags filters flows based on TCP header flags
repeated TCPFlags tcp_flags = 23;
@@ -537,8 +557,7 @@ enum EventType {
RecordLost = 2;
}
-// DNS flow. This is basically directly mapped from Cilium's LogRecordDNS:
-// https://github.com/cilium/cilium/blob/04f3889d627774f79e56d14ddbc165b3169e2d01/pkg/proxy/accesslog/record.go#L264
+// DNS flow. This is basically directly mapped from Cilium's [LogRecordDNS](https://github.com/cilium/cilium/blob/04f3889d627774f79e56d14ddbc165b3169e2d01/pkg/proxy/accesslog/record.go#L264):
message DNS {
// DNS name that's being looked up: e.g. "isovalent.com."
string query = 1;
@@ -567,8 +586,7 @@ message HTTPHeader {
string value = 2;
}
-// L7 information for HTTP flows. It corresponds to Cilium's accesslog.LogRecordHTTP type.
-// https://github.com/cilium/cilium/blob/728c79e427438ab6f8d9375b62fccd6fed4ace3a/pkg/proxy/accesslog/record.go#L206
+// L7 information for HTTP flows. It corresponds to Cilium's [accesslog.LogRecordHTTP](https://github.com/cilium/cilium/blob/728c79e427438ab6f8d9375b62fccd6fed4ace3a/pkg/proxy/accesslog/record.go#L206) type.
message HTTP {
uint32 code = 1;
string method = 2;
@@ -577,8 +595,7 @@ message HTTP {
repeated HTTPHeader headers = 5;
}
-// L7 information for Kafka flows. It corresponds to Cilium's accesslog.LogRecordKafka type.
-// https://github.com/cilium/cilium/blob/728c79e427438ab6f8d9375b62fccd6fed4ace3a/pkg/proxy/accesslog/record.go#L229
+// L7 information for Kafka flows. It corresponds to Cilium's [accesslog.LogRecordKafka](https://github.com/cilium/cilium/blob/728c79e427438ab6f8d9375b62fccd6fed4ace3a/pkg/proxy/accesslog/record.go#L229) type.
message Kafka {
int32 error_code = 1;
int32 api_version = 2;
@@ -621,7 +638,7 @@ message LostEvent {
}
// AgentEventType is the type of agent event. These values are shared with type
-// AgentNotification in pkg/monitor/api/types.go
+// AgentNotification in pkg/monitor/api/types.go.
enum AgentEventType {
AGENT_EVENT_UNKNOWN = 0;
// used for AGENT_EVENT_GENERIC in monitor API, but there are currently no
@@ -811,4 +828,6 @@ enum DebugEventType {
DBG_SK_LOOKUP4 = 62;
DBG_SK_LOOKUP6 = 63;
DBG_SK_ASSIGN = 64;
+ DBG_L7_LB = 65;
+ DBG_SKIP_POLICY = 66;
}
diff --git a/vendor/github.com/cilium/cilium/api/v1/flow/flow_deepcopy.pb.go b/vendor/github.com/cilium/cilium/api/v1/flow/flow_deepcopy.pb.go
new file mode 100644
index 000000000..94dc44316
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/flow/flow_deepcopy.pb.go
@@ -0,0 +1,784 @@
+// Code generated by protoc-gen-deepcopy. DO NOT EDIT.
+
+package flow
+
+import (
+ proto "google.golang.org/protobuf/proto"
+)
+
+// DeepCopyInto supports using Flow within kubernetes types, where deepcopy-gen is used.
+func (in *Flow) DeepCopyInto(out *Flow) {
+ p := proto.Clone(in).(*Flow)
+ *out = *p
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Flow. Required by controller-gen.
+func (in *Flow) DeepCopy() *Flow {
+ if in == nil {
+ return nil
+ }
+ out := new(Flow)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new Flow. Required by controller-gen.
+func (in *Flow) DeepCopyInterface() interface{} {
+ return in.DeepCopy()
+}
+
+// DeepCopyInto supports using Layer4 within kubernetes types, where deepcopy-gen is used.
+func (in *Layer4) DeepCopyInto(out *Layer4) {
+ p := proto.Clone(in).(*Layer4)
+ *out = *p
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Layer4. Required by controller-gen.
+func (in *Layer4) DeepCopy() *Layer4 {
+ if in == nil {
+ return nil
+ }
+ out := new(Layer4)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new Layer4. Required by controller-gen.
+func (in *Layer4) DeepCopyInterface() interface{} {
+ return in.DeepCopy()
+}
+
+// DeepCopyInto supports using Layer7 within kubernetes types, where deepcopy-gen is used.
+func (in *Layer7) DeepCopyInto(out *Layer7) {
+ p := proto.Clone(in).(*Layer7)
+ *out = *p
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Layer7. Required by controller-gen.
+func (in *Layer7) DeepCopy() *Layer7 {
+ if in == nil {
+ return nil
+ }
+ out := new(Layer7)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new Layer7. Required by controller-gen.
+func (in *Layer7) DeepCopyInterface() interface{} {
+ return in.DeepCopy()
+}
+
+// DeepCopyInto supports using TraceContext within kubernetes types, where deepcopy-gen is used.
+func (in *TraceContext) DeepCopyInto(out *TraceContext) {
+ p := proto.Clone(in).(*TraceContext)
+ *out = *p
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TraceContext. Required by controller-gen.
+func (in *TraceContext) DeepCopy() *TraceContext {
+ if in == nil {
+ return nil
+ }
+ out := new(TraceContext)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new TraceContext. Required by controller-gen.
+func (in *TraceContext) DeepCopyInterface() interface{} {
+ return in.DeepCopy()
+}
+
+// DeepCopyInto supports using TraceParent within kubernetes types, where deepcopy-gen is used.
+func (in *TraceParent) DeepCopyInto(out *TraceParent) {
+ p := proto.Clone(in).(*TraceParent)
+ *out = *p
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TraceParent. Required by controller-gen.
+func (in *TraceParent) DeepCopy() *TraceParent {
+ if in == nil {
+ return nil
+ }
+ out := new(TraceParent)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new TraceParent. Required by controller-gen.
+func (in *TraceParent) DeepCopyInterface() interface{} {
+ return in.DeepCopy()
+}
+
+// DeepCopyInto supports using Endpoint within kubernetes types, where deepcopy-gen is used.
+func (in *Endpoint) DeepCopyInto(out *Endpoint) {
+ p := proto.Clone(in).(*Endpoint)
+ *out = *p
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Endpoint. Required by controller-gen.
+func (in *Endpoint) DeepCopy() *Endpoint {
+ if in == nil {
+ return nil
+ }
+ out := new(Endpoint)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new Endpoint. Required by controller-gen.
+func (in *Endpoint) DeepCopyInterface() interface{} {
+ return in.DeepCopy()
+}
+
+// DeepCopyInto supports using Workload within kubernetes types, where deepcopy-gen is used.
+func (in *Workload) DeepCopyInto(out *Workload) {
+ p := proto.Clone(in).(*Workload)
+ *out = *p
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Workload. Required by controller-gen.
+func (in *Workload) DeepCopy() *Workload {
+ if in == nil {
+ return nil
+ }
+ out := new(Workload)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new Workload. Required by controller-gen.
+func (in *Workload) DeepCopyInterface() interface{} {
+ return in.DeepCopy()
+}
+
+// DeepCopyInto supports using TCP within kubernetes types, where deepcopy-gen is used.
+func (in *TCP) DeepCopyInto(out *TCP) {
+ p := proto.Clone(in).(*TCP)
+ *out = *p
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCP. Required by controller-gen.
+func (in *TCP) DeepCopy() *TCP {
+ if in == nil {
+ return nil
+ }
+ out := new(TCP)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new TCP. Required by controller-gen.
+func (in *TCP) DeepCopyInterface() interface{} {
+ return in.DeepCopy()
+}
+
+// DeepCopyInto supports using IP within kubernetes types, where deepcopy-gen is used.
+func (in *IP) DeepCopyInto(out *IP) {
+ p := proto.Clone(in).(*IP)
+ *out = *p
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IP. Required by controller-gen.
+func (in *IP) DeepCopy() *IP {
+ if in == nil {
+ return nil
+ }
+ out := new(IP)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new IP. Required by controller-gen.
+func (in *IP) DeepCopyInterface() interface{} {
+ return in.DeepCopy()
+}
+
+// DeepCopyInto supports using Ethernet within kubernetes types, where deepcopy-gen is used.
+func (in *Ethernet) DeepCopyInto(out *Ethernet) {
+ p := proto.Clone(in).(*Ethernet)
+ *out = *p
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Ethernet. Required by controller-gen.
+func (in *Ethernet) DeepCopy() *Ethernet {
+ if in == nil {
+ return nil
+ }
+ out := new(Ethernet)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new Ethernet. Required by controller-gen.
+func (in *Ethernet) DeepCopyInterface() interface{} {
+ return in.DeepCopy()
+}
+
+// DeepCopyInto supports using TCPFlags within kubernetes types, where deepcopy-gen is used.
+func (in *TCPFlags) DeepCopyInto(out *TCPFlags) {
+ p := proto.Clone(in).(*TCPFlags)
+ *out = *p
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPFlags. Required by controller-gen.
+func (in *TCPFlags) DeepCopy() *TCPFlags {
+ if in == nil {
+ return nil
+ }
+ out := new(TCPFlags)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new TCPFlags. Required by controller-gen.
+func (in *TCPFlags) DeepCopyInterface() interface{} {
+ return in.DeepCopy()
+}
+
+// DeepCopyInto supports using UDP within kubernetes types, where deepcopy-gen is used.
+func (in *UDP) DeepCopyInto(out *UDP) {
+ p := proto.Clone(in).(*UDP)
+ *out = *p
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UDP. Required by controller-gen.
+func (in *UDP) DeepCopy() *UDP {
+ if in == nil {
+ return nil
+ }
+ out := new(UDP)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new UDP. Required by controller-gen.
+func (in *UDP) DeepCopyInterface() interface{} {
+ return in.DeepCopy()
+}
+
+// DeepCopyInto supports using SCTP within kubernetes types, where deepcopy-gen is used.
+func (in *SCTP) DeepCopyInto(out *SCTP) {
+ p := proto.Clone(in).(*SCTP)
+ *out = *p
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SCTP. Required by controller-gen.
+func (in *SCTP) DeepCopy() *SCTP {
+ if in == nil {
+ return nil
+ }
+ out := new(SCTP)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new SCTP. Required by controller-gen.
+func (in *SCTP) DeepCopyInterface() interface{} {
+ return in.DeepCopy()
+}
+
+// DeepCopyInto supports using ICMPv4 within kubernetes types, where deepcopy-gen is used.
+func (in *ICMPv4) DeepCopyInto(out *ICMPv4) {
+ p := proto.Clone(in).(*ICMPv4)
+ *out = *p
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ICMPv4. Required by controller-gen.
+func (in *ICMPv4) DeepCopy() *ICMPv4 {
+ if in == nil {
+ return nil
+ }
+ out := new(ICMPv4)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new ICMPv4. Required by controller-gen.
+func (in *ICMPv4) DeepCopyInterface() interface{} {
+ return in.DeepCopy()
+}
+
+// DeepCopyInto supports using ICMPv6 within kubernetes types, where deepcopy-gen is used.
+func (in *ICMPv6) DeepCopyInto(out *ICMPv6) {
+ p := proto.Clone(in).(*ICMPv6)
+ *out = *p
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ICMPv6. Required by controller-gen.
+func (in *ICMPv6) DeepCopy() *ICMPv6 {
+ if in == nil {
+ return nil
+ }
+ out := new(ICMPv6)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new ICMPv6. Required by controller-gen.
+func (in *ICMPv6) DeepCopyInterface() interface{} {
+ return in.DeepCopy()
+}
+
+// DeepCopyInto supports using Policy within kubernetes types, where deepcopy-gen is used.
+func (in *Policy) DeepCopyInto(out *Policy) {
+ p := proto.Clone(in).(*Policy)
+ *out = *p
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Policy. Required by controller-gen.
+func (in *Policy) DeepCopy() *Policy {
+ if in == nil {
+ return nil
+ }
+ out := new(Policy)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new Policy. Required by controller-gen.
+func (in *Policy) DeepCopyInterface() interface{} {
+ return in.DeepCopy()
+}
+
+// DeepCopyInto supports using EventTypeFilter within kubernetes types, where deepcopy-gen is used.
+func (in *EventTypeFilter) DeepCopyInto(out *EventTypeFilter) {
+ p := proto.Clone(in).(*EventTypeFilter)
+ *out = *p
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventTypeFilter. Required by controller-gen.
+func (in *EventTypeFilter) DeepCopy() *EventTypeFilter {
+ if in == nil {
+ return nil
+ }
+ out := new(EventTypeFilter)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new EventTypeFilter. Required by controller-gen.
+func (in *EventTypeFilter) DeepCopyInterface() interface{} {
+ return in.DeepCopy()
+}
+
+// DeepCopyInto supports using CiliumEventType within kubernetes types, where deepcopy-gen is used.
+func (in *CiliumEventType) DeepCopyInto(out *CiliumEventType) {
+ p := proto.Clone(in).(*CiliumEventType)
+ *out = *p
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumEventType. Required by controller-gen.
+func (in *CiliumEventType) DeepCopy() *CiliumEventType {
+ if in == nil {
+ return nil
+ }
+ out := new(CiliumEventType)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new CiliumEventType. Required by controller-gen.
+func (in *CiliumEventType) DeepCopyInterface() interface{} {
+ return in.DeepCopy()
+}
+
+// DeepCopyInto supports using FlowFilter within kubernetes types, where deepcopy-gen is used.
+func (in *FlowFilter) DeepCopyInto(out *FlowFilter) {
+ p := proto.Clone(in).(*FlowFilter)
+ *out = *p
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlowFilter. Required by controller-gen.
+func (in *FlowFilter) DeepCopy() *FlowFilter {
+ if in == nil {
+ return nil
+ }
+ out := new(FlowFilter)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new FlowFilter. Required by controller-gen.
+func (in *FlowFilter) DeepCopyInterface() interface{} {
+ return in.DeepCopy()
+}
+
+// DeepCopyInto supports using DNS within kubernetes types, where deepcopy-gen is used.
+func (in *DNS) DeepCopyInto(out *DNS) {
+ p := proto.Clone(in).(*DNS)
+ *out = *p
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNS. Required by controller-gen.
+func (in *DNS) DeepCopy() *DNS {
+ if in == nil {
+ return nil
+ }
+ out := new(DNS)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new DNS. Required by controller-gen.
+func (in *DNS) DeepCopyInterface() interface{} {
+ return in.DeepCopy()
+}
+
+// DeepCopyInto supports using HTTPHeader within kubernetes types, where deepcopy-gen is used.
+func (in *HTTPHeader) DeepCopyInto(out *HTTPHeader) {
+ p := proto.Clone(in).(*HTTPHeader)
+ *out = *p
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPHeader. Required by controller-gen.
+func (in *HTTPHeader) DeepCopy() *HTTPHeader {
+ if in == nil {
+ return nil
+ }
+ out := new(HTTPHeader)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new HTTPHeader. Required by controller-gen.
+func (in *HTTPHeader) DeepCopyInterface() interface{} {
+ return in.DeepCopy()
+}
+
+// DeepCopyInto supports using HTTP within kubernetes types, where deepcopy-gen is used.
+func (in *HTTP) DeepCopyInto(out *HTTP) {
+ p := proto.Clone(in).(*HTTP)
+ *out = *p
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTP. Required by controller-gen.
+func (in *HTTP) DeepCopy() *HTTP {
+ if in == nil {
+ return nil
+ }
+ out := new(HTTP)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new HTTP. Required by controller-gen.
+func (in *HTTP) DeepCopyInterface() interface{} {
+ return in.DeepCopy()
+}
+
+// DeepCopyInto supports using Kafka within kubernetes types, where deepcopy-gen is used.
+func (in *Kafka) DeepCopyInto(out *Kafka) {
+ p := proto.Clone(in).(*Kafka)
+ *out = *p
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Kafka. Required by controller-gen.
+func (in *Kafka) DeepCopy() *Kafka {
+ if in == nil {
+ return nil
+ }
+ out := new(Kafka)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new Kafka. Required by controller-gen.
+func (in *Kafka) DeepCopyInterface() interface{} {
+ return in.DeepCopy()
+}
+
+// DeepCopyInto supports using Service within kubernetes types, where deepcopy-gen is used.
+func (in *Service) DeepCopyInto(out *Service) {
+ p := proto.Clone(in).(*Service)
+ *out = *p
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Service. Required by controller-gen.
+func (in *Service) DeepCopy() *Service {
+ if in == nil {
+ return nil
+ }
+ out := new(Service)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new Service. Required by controller-gen.
+func (in *Service) DeepCopyInterface() interface{} {
+ return in.DeepCopy()
+}
+
+// DeepCopyInto supports using LostEvent within kubernetes types, where deepcopy-gen is used.
+func (in *LostEvent) DeepCopyInto(out *LostEvent) {
+ p := proto.Clone(in).(*LostEvent)
+ *out = *p
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LostEvent. Required by controller-gen.
+func (in *LostEvent) DeepCopy() *LostEvent {
+ if in == nil {
+ return nil
+ }
+ out := new(LostEvent)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new LostEvent. Required by controller-gen.
+func (in *LostEvent) DeepCopyInterface() interface{} {
+ return in.DeepCopy()
+}
+
+// DeepCopyInto supports using AgentEvent within kubernetes types, where deepcopy-gen is used.
+func (in *AgentEvent) DeepCopyInto(out *AgentEvent) {
+ p := proto.Clone(in).(*AgentEvent)
+ *out = *p
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AgentEvent. Required by controller-gen.
+func (in *AgentEvent) DeepCopy() *AgentEvent {
+ if in == nil {
+ return nil
+ }
+ out := new(AgentEvent)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new AgentEvent. Required by controller-gen.
+func (in *AgentEvent) DeepCopyInterface() interface{} {
+ return in.DeepCopy()
+}
+
+// DeepCopyInto supports using AgentEventUnknown within kubernetes types, where deepcopy-gen is used.
+func (in *AgentEventUnknown) DeepCopyInto(out *AgentEventUnknown) {
+ p := proto.Clone(in).(*AgentEventUnknown)
+ *out = *p
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AgentEventUnknown. Required by controller-gen.
+func (in *AgentEventUnknown) DeepCopy() *AgentEventUnknown {
+ if in == nil {
+ return nil
+ }
+ out := new(AgentEventUnknown)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new AgentEventUnknown. Required by controller-gen.
+func (in *AgentEventUnknown) DeepCopyInterface() interface{} {
+ return in.DeepCopy()
+}
+
+// DeepCopyInto supports using TimeNotification within kubernetes types, where deepcopy-gen is used.
+func (in *TimeNotification) DeepCopyInto(out *TimeNotification) {
+ p := proto.Clone(in).(*TimeNotification)
+ *out = *p
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimeNotification. Required by controller-gen.
+func (in *TimeNotification) DeepCopy() *TimeNotification {
+ if in == nil {
+ return nil
+ }
+ out := new(TimeNotification)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new TimeNotification. Required by controller-gen.
+func (in *TimeNotification) DeepCopyInterface() interface{} {
+ return in.DeepCopy()
+}
+
+// DeepCopyInto supports using PolicyUpdateNotification within kubernetes types, where deepcopy-gen is used.
+func (in *PolicyUpdateNotification) DeepCopyInto(out *PolicyUpdateNotification) {
+ p := proto.Clone(in).(*PolicyUpdateNotification)
+ *out = *p
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyUpdateNotification. Required by controller-gen.
+func (in *PolicyUpdateNotification) DeepCopy() *PolicyUpdateNotification {
+ if in == nil {
+ return nil
+ }
+ out := new(PolicyUpdateNotification)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new PolicyUpdateNotification. Required by controller-gen.
+func (in *PolicyUpdateNotification) DeepCopyInterface() interface{} {
+ return in.DeepCopy()
+}
+
+// DeepCopyInto supports using EndpointRegenNotification within kubernetes types, where deepcopy-gen is used.
+func (in *EndpointRegenNotification) DeepCopyInto(out *EndpointRegenNotification) {
+ p := proto.Clone(in).(*EndpointRegenNotification)
+ *out = *p
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointRegenNotification. Required by controller-gen.
+func (in *EndpointRegenNotification) DeepCopy() *EndpointRegenNotification {
+ if in == nil {
+ return nil
+ }
+ out := new(EndpointRegenNotification)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new EndpointRegenNotification. Required by controller-gen.
+func (in *EndpointRegenNotification) DeepCopyInterface() interface{} {
+ return in.DeepCopy()
+}
+
+// DeepCopyInto supports using EndpointUpdateNotification within kubernetes types, where deepcopy-gen is used.
+func (in *EndpointUpdateNotification) DeepCopyInto(out *EndpointUpdateNotification) {
+ p := proto.Clone(in).(*EndpointUpdateNotification)
+ *out = *p
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointUpdateNotification. Required by controller-gen.
+func (in *EndpointUpdateNotification) DeepCopy() *EndpointUpdateNotification {
+ if in == nil {
+ return nil
+ }
+ out := new(EndpointUpdateNotification)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new EndpointUpdateNotification. Required by controller-gen.
+func (in *EndpointUpdateNotification) DeepCopyInterface() interface{} {
+ return in.DeepCopy()
+}
+
+// DeepCopyInto supports using IPCacheNotification within kubernetes types, where deepcopy-gen is used.
+func (in *IPCacheNotification) DeepCopyInto(out *IPCacheNotification) {
+ p := proto.Clone(in).(*IPCacheNotification)
+ *out = *p
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPCacheNotification. Required by controller-gen.
+func (in *IPCacheNotification) DeepCopy() *IPCacheNotification {
+ if in == nil {
+ return nil
+ }
+ out := new(IPCacheNotification)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new IPCacheNotification. Required by controller-gen.
+func (in *IPCacheNotification) DeepCopyInterface() interface{} {
+ return in.DeepCopy()
+}
+
+// DeepCopyInto supports using ServiceUpsertNotificationAddr within kubernetes types, where deepcopy-gen is used.
+func (in *ServiceUpsertNotificationAddr) DeepCopyInto(out *ServiceUpsertNotificationAddr) {
+ p := proto.Clone(in).(*ServiceUpsertNotificationAddr)
+ *out = *p
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceUpsertNotificationAddr. Required by controller-gen.
+func (in *ServiceUpsertNotificationAddr) DeepCopy() *ServiceUpsertNotificationAddr {
+ if in == nil {
+ return nil
+ }
+ out := new(ServiceUpsertNotificationAddr)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new ServiceUpsertNotificationAddr. Required by controller-gen.
+func (in *ServiceUpsertNotificationAddr) DeepCopyInterface() interface{} {
+ return in.DeepCopy()
+}
+
+// DeepCopyInto supports using ServiceUpsertNotification within kubernetes types, where deepcopy-gen is used.
+func (in *ServiceUpsertNotification) DeepCopyInto(out *ServiceUpsertNotification) {
+ p := proto.Clone(in).(*ServiceUpsertNotification)
+ *out = *p
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceUpsertNotification. Required by controller-gen.
+func (in *ServiceUpsertNotification) DeepCopy() *ServiceUpsertNotification {
+ if in == nil {
+ return nil
+ }
+ out := new(ServiceUpsertNotification)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new ServiceUpsertNotification. Required by controller-gen.
+func (in *ServiceUpsertNotification) DeepCopyInterface() interface{} {
+ return in.DeepCopy()
+}
+
+// DeepCopyInto supports using ServiceDeleteNotification within kubernetes types, where deepcopy-gen is used.
+func (in *ServiceDeleteNotification) DeepCopyInto(out *ServiceDeleteNotification) {
+ p := proto.Clone(in).(*ServiceDeleteNotification)
+ *out = *p
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceDeleteNotification. Required by controller-gen.
+func (in *ServiceDeleteNotification) DeepCopy() *ServiceDeleteNotification {
+ if in == nil {
+ return nil
+ }
+ out := new(ServiceDeleteNotification)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new ServiceDeleteNotification. Required by controller-gen.
+func (in *ServiceDeleteNotification) DeepCopyInterface() interface{} {
+ return in.DeepCopy()
+}
+
+// DeepCopyInto supports using NetworkInterface within kubernetes types, where deepcopy-gen is used.
+func (in *NetworkInterface) DeepCopyInto(out *NetworkInterface) {
+ p := proto.Clone(in).(*NetworkInterface)
+ *out = *p
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkInterface. Required by controller-gen.
+func (in *NetworkInterface) DeepCopy() *NetworkInterface {
+ if in == nil {
+ return nil
+ }
+ out := new(NetworkInterface)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new NetworkInterface. Required by controller-gen.
+func (in *NetworkInterface) DeepCopyInterface() interface{} {
+ return in.DeepCopy()
+}
+
+// DeepCopyInto supports using DebugEvent within kubernetes types, where deepcopy-gen is used.
+func (in *DebugEvent) DeepCopyInto(out *DebugEvent) {
+ p := proto.Clone(in).(*DebugEvent)
+ *out = *p
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DebugEvent. Required by controller-gen.
+func (in *DebugEvent) DeepCopy() *DebugEvent {
+ if in == nil {
+ return nil
+ }
+ out := new(DebugEvent)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInterface is an autogenerated deepcopy function, copying the receiver, creating a new DebugEvent. Required by controller-gen.
+func (in *DebugEvent) DeepCopyInterface() interface{} {
+ return in.DeepCopy()
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/health/client/cilium_health_api_client.go b/vendor/github.com/cilium/cilium/api/v1/health/client/cilium_health_api_client.go
new file mode 100644
index 000000000..b6887e63d
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/health/client/cilium_health_api_client.go
@@ -0,0 +1,120 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package client
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "github.com/go-openapi/runtime"
+ httptransport "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/cilium/cilium/api/v1/health/client/connectivity"
+ "github.com/cilium/cilium/api/v1/health/client/restapi"
+)
+
+// Default cilium health API HTTP client.
+var Default = NewHTTPClient(nil)
+
+const (
+ // DefaultHost is the default Host
+ // found in Meta (info) section of spec file
+ DefaultHost string = "localhost"
+ // DefaultBasePath is the default BasePath
+ // found in Meta (info) section of spec file
+ DefaultBasePath string = "/v1beta"
+)
+
+// DefaultSchemes are the default schemes found in Meta (info) section of spec file
+var DefaultSchemes = []string{"http"}
+
+// NewHTTPClient creates a new cilium health API HTTP client.
+func NewHTTPClient(formats strfmt.Registry) *CiliumHealthAPI {
+ return NewHTTPClientWithConfig(formats, nil)
+}
+
+// NewHTTPClientWithConfig creates a new cilium health API HTTP client,
+// using a customizable transport config.
+func NewHTTPClientWithConfig(formats strfmt.Registry, cfg *TransportConfig) *CiliumHealthAPI {
+ // ensure nullable parameters have default
+ if cfg == nil {
+ cfg = DefaultTransportConfig()
+ }
+
+ // create transport and client
+ transport := httptransport.New(cfg.Host, cfg.BasePath, cfg.Schemes)
+ return New(transport, formats)
+}
+
+// New creates a new cilium health API client
+func New(transport runtime.ClientTransport, formats strfmt.Registry) *CiliumHealthAPI {
+ // ensure nullable parameters have default
+ if formats == nil {
+ formats = strfmt.Default
+ }
+
+ cli := new(CiliumHealthAPI)
+ cli.Transport = transport
+ cli.Connectivity = connectivity.New(transport, formats)
+ cli.Restapi = restapi.New(transport, formats)
+ return cli
+}
+
+// DefaultTransportConfig creates a TransportConfig with the
+// default settings taken from the meta section of the spec file.
+func DefaultTransportConfig() *TransportConfig {
+ return &TransportConfig{
+ Host: DefaultHost,
+ BasePath: DefaultBasePath,
+ Schemes: DefaultSchemes,
+ }
+}
+
+// TransportConfig contains the transport related info,
+// found in the meta section of the spec file.
+type TransportConfig struct {
+ Host string
+ BasePath string
+ Schemes []string
+}
+
+// WithHost overrides the default host,
+// provided by the meta section of the spec file.
+func (cfg *TransportConfig) WithHost(host string) *TransportConfig {
+ cfg.Host = host
+ return cfg
+}
+
+// WithBasePath overrides the default basePath,
+// provided by the meta section of the spec file.
+func (cfg *TransportConfig) WithBasePath(basePath string) *TransportConfig {
+ cfg.BasePath = basePath
+ return cfg
+}
+
+// WithSchemes overrides the default schemes,
+// provided by the meta section of the spec file.
+func (cfg *TransportConfig) WithSchemes(schemes []string) *TransportConfig {
+ cfg.Schemes = schemes
+ return cfg
+}
+
+// CiliumHealthAPI is a client for cilium health API
+type CiliumHealthAPI struct {
+ Connectivity connectivity.ClientService
+
+ Restapi restapi.ClientService
+
+ Transport runtime.ClientTransport
+}
+
+// SetTransport changes the transport on the client and all its subresources
+func (c *CiliumHealthAPI) SetTransport(transport runtime.ClientTransport) {
+ c.Transport = transport
+ c.Connectivity.SetTransport(transport)
+ c.Restapi.SetTransport(transport)
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/health/client/connectivity/connectivity_client.go b/vendor/github.com/cilium/cilium/api/v1/health/client/connectivity/connectivity_client.go
new file mode 100644
index 000000000..b8918f4b2
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/health/client/connectivity/connectivity_client.go
@@ -0,0 +1,130 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package connectivity
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+)
+
+// New creates a new connectivity API client.
+func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService {
+ return &Client{transport: transport, formats: formats}
+}
+
+/*
+Client for connectivity API
+*/
+type Client struct {
+ transport runtime.ClientTransport
+ formats strfmt.Registry
+}
+
+// ClientOption is the option for Client methods
+type ClientOption func(*runtime.ClientOperation)
+
+// ClientService is the interface for Client methods
+type ClientService interface {
+ GetStatus(params *GetStatusParams, opts ...ClientOption) (*GetStatusOK, error)
+
+ PutStatusProbe(params *PutStatusProbeParams, opts ...ClientOption) (*PutStatusProbeOK, error)
+
+ SetTransport(transport runtime.ClientTransport)
+}
+
+/*
+ GetStatus gets connectivity status of the cilium cluster
+
+ Returns the connectivity status to all other cilium-health instances
+
+using interval-based probing.
+*/
+func (a *Client) GetStatus(params *GetStatusParams, opts ...ClientOption) (*GetStatusOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewGetStatusParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "GetStatus",
+ Method: "GET",
+ PathPattern: "/status",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &GetStatusReader{formats: a.formats},
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*GetStatusOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for GetStatus: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+/*
+ PutStatusProbe runs synchronous connectivity probe to determine status of the cilium cluster
+
+ Runs a synchronous probe to all other cilium-health instances and
+
+returns the connectivity status.
+*/
+func (a *Client) PutStatusProbe(params *PutStatusProbeParams, opts ...ClientOption) (*PutStatusProbeOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewPutStatusProbeParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "PutStatusProbe",
+ Method: "PUT",
+ PathPattern: "/status/probe",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &PutStatusProbeReader{formats: a.formats},
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*PutStatusProbeOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for PutStatusProbe: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+// SetTransport changes the transport on the client
+func (a *Client) SetTransport(transport runtime.ClientTransport) {
+ a.transport = transport
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/health/client/connectivity/get_status_parameters.go b/vendor/github.com/cilium/cilium/api/v1/health/client/connectivity/get_status_parameters.go
new file mode 100644
index 000000000..bf0cf9138
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/health/client/connectivity/get_status_parameters.go
@@ -0,0 +1,131 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package connectivity
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewGetStatusParams creates a new GetStatusParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewGetStatusParams() *GetStatusParams {
+ return &GetStatusParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewGetStatusParamsWithTimeout creates a new GetStatusParams object
+// with the ability to set a timeout on a request.
+func NewGetStatusParamsWithTimeout(timeout time.Duration) *GetStatusParams {
+ return &GetStatusParams{
+ timeout: timeout,
+ }
+}
+
+// NewGetStatusParamsWithContext creates a new GetStatusParams object
+// with the ability to set a context for a request.
+func NewGetStatusParamsWithContext(ctx context.Context) *GetStatusParams {
+ return &GetStatusParams{
+ Context: ctx,
+ }
+}
+
+// NewGetStatusParamsWithHTTPClient creates a new GetStatusParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewGetStatusParamsWithHTTPClient(client *http.Client) *GetStatusParams {
+ return &GetStatusParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+GetStatusParams contains all the parameters to send to the API endpoint
+
+ for the get status operation.
+
+ Typically these are written to a http.Request.
+*/
+type GetStatusParams struct {
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the get status params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetStatusParams) WithDefaults() *GetStatusParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the get status params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetStatusParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the get status params
+func (o *GetStatusParams) WithTimeout(timeout time.Duration) *GetStatusParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the get status params
+func (o *GetStatusParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the get status params
+func (o *GetStatusParams) WithContext(ctx context.Context) *GetStatusParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the get status params
+func (o *GetStatusParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the get status params
+func (o *GetStatusParams) WithHTTPClient(client *http.Client) *GetStatusParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the get status params
+func (o *GetStatusParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *GetStatusParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/health/client/connectivity/get_status_responses.go b/vendor/github.com/cilium/cilium/api/v1/health/client/connectivity/get_status_responses.go
new file mode 100644
index 000000000..cf977a0df
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/health/client/connectivity/get_status_responses.go
@@ -0,0 +1,101 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package connectivity
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/cilium/cilium/api/v1/health/models"
+)
+
+// GetStatusReader is a Reader for the GetStatus structure.
+type GetStatusReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *GetStatusReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewGetStatusOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code())
+ }
+}
+
+// NewGetStatusOK creates a GetStatusOK with default headers values
+func NewGetStatusOK() *GetStatusOK {
+ return &GetStatusOK{}
+}
+
+/*
+GetStatusOK describes a response with status code 200, with default header values.
+
+Success
+*/
+type GetStatusOK struct {
+ Payload *models.HealthStatusResponse
+}
+
+// IsSuccess returns true when this get status o k response has a 2xx status code
+func (o *GetStatusOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this get status o k response has a 3xx status code
+func (o *GetStatusOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get status o k response has a 4xx status code
+func (o *GetStatusOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get status o k response has a 5xx status code
+func (o *GetStatusOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get status o k response a status code equal to that given
+func (o *GetStatusOK) IsCode(code int) bool {
+ return code == 200
+}
+
+func (o *GetStatusOK) Error() string {
+ return fmt.Sprintf("[GET /status][%d] getStatusOK %+v", 200, o.Payload)
+}
+
+func (o *GetStatusOK) String() string {
+ return fmt.Sprintf("[GET /status][%d] getStatusOK %+v", 200, o.Payload)
+}
+
+func (o *GetStatusOK) GetPayload() *models.HealthStatusResponse {
+ return o.Payload
+}
+
+func (o *GetStatusOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ o.Payload = new(models.HealthStatusResponse)
+
+ // response payload
+ if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/health/client/connectivity/put_status_probe_parameters.go b/vendor/github.com/cilium/cilium/api/v1/health/client/connectivity/put_status_probe_parameters.go
new file mode 100644
index 000000000..8708a9a2a
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/health/client/connectivity/put_status_probe_parameters.go
@@ -0,0 +1,131 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package connectivity
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewPutStatusProbeParams creates a new PutStatusProbeParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewPutStatusProbeParams() *PutStatusProbeParams {
+ return &PutStatusProbeParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewPutStatusProbeParamsWithTimeout creates a new PutStatusProbeParams object
+// with the ability to set a timeout on a request.
+func NewPutStatusProbeParamsWithTimeout(timeout time.Duration) *PutStatusProbeParams {
+ return &PutStatusProbeParams{
+ timeout: timeout,
+ }
+}
+
+// NewPutStatusProbeParamsWithContext creates a new PutStatusProbeParams object
+// with the ability to set a context for a request.
+func NewPutStatusProbeParamsWithContext(ctx context.Context) *PutStatusProbeParams {
+ return &PutStatusProbeParams{
+ Context: ctx,
+ }
+}
+
+// NewPutStatusProbeParamsWithHTTPClient creates a new PutStatusProbeParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewPutStatusProbeParamsWithHTTPClient(client *http.Client) *PutStatusProbeParams {
+ return &PutStatusProbeParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+PutStatusProbeParams contains all the parameters to send to the API endpoint
+
+ for the put status probe operation.
+
+ Typically these are written to a http.Request.
+*/
+type PutStatusProbeParams struct {
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the put status probe params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *PutStatusProbeParams) WithDefaults() *PutStatusProbeParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the put status probe params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *PutStatusProbeParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the put status probe params
+func (o *PutStatusProbeParams) WithTimeout(timeout time.Duration) *PutStatusProbeParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the put status probe params
+func (o *PutStatusProbeParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the put status probe params
+func (o *PutStatusProbeParams) WithContext(ctx context.Context) *PutStatusProbeParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the put status probe params
+func (o *PutStatusProbeParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the put status probe params
+func (o *PutStatusProbeParams) WithHTTPClient(client *http.Client) *PutStatusProbeParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the put status probe params
+func (o *PutStatusProbeParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *PutStatusProbeParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/health/client/connectivity/put_status_probe_responses.go b/vendor/github.com/cilium/cilium/api/v1/health/client/connectivity/put_status_probe_responses.go
new file mode 100644
index 000000000..68d6dadbd
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/health/client/connectivity/put_status_probe_responses.go
@@ -0,0 +1,225 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package connectivity
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/cilium/cilium/api/v1/health/models"
+)
+
+// PutStatusProbeReader is a Reader for the PutStatusProbe structure.
+type PutStatusProbeReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *PutStatusProbeReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewPutStatusProbeOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 403:
+ result := NewPutStatusProbeForbidden()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ case 500:
+ result := NewPutStatusProbeFailed()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ default:
+ return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code())
+ }
+}
+
+// NewPutStatusProbeOK creates a PutStatusProbeOK with default headers values
+func NewPutStatusProbeOK() *PutStatusProbeOK {
+ return &PutStatusProbeOK{}
+}
+
+/*
+PutStatusProbeOK describes a response with status code 200, with default header values.
+
+Success
+*/
+type PutStatusProbeOK struct {
+ Payload *models.HealthStatusResponse
+}
+
+// IsSuccess returns true when this put status probe o k response has a 2xx status code
+func (o *PutStatusProbeOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this put status probe o k response has a 3xx status code
+func (o *PutStatusProbeOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this put status probe o k response has a 4xx status code
+func (o *PutStatusProbeOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this put status probe o k response has a 5xx status code
+func (o *PutStatusProbeOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this put status probe o k response a status code equal to that given
+func (o *PutStatusProbeOK) IsCode(code int) bool {
+ return code == 200
+}
+
+func (o *PutStatusProbeOK) Error() string {
+ return fmt.Sprintf("[PUT /status/probe][%d] putStatusProbeOK %+v", 200, o.Payload)
+}
+
+func (o *PutStatusProbeOK) String() string {
+ return fmt.Sprintf("[PUT /status/probe][%d] putStatusProbeOK %+v", 200, o.Payload)
+}
+
+func (o *PutStatusProbeOK) GetPayload() *models.HealthStatusResponse {
+ return o.Payload
+}
+
+func (o *PutStatusProbeOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ o.Payload = new(models.HealthStatusResponse)
+
+ // response payload
+ if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewPutStatusProbeForbidden creates a PutStatusProbeForbidden with default headers values
+func NewPutStatusProbeForbidden() *PutStatusProbeForbidden {
+ return &PutStatusProbeForbidden{}
+}
+
+/*
+PutStatusProbeForbidden describes a response with status code 403, with default header values.
+
+Forbidden
+*/
+type PutStatusProbeForbidden struct {
+}
+
+// IsSuccess returns true when this put status probe forbidden response has a 2xx status code
+func (o *PutStatusProbeForbidden) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this put status probe forbidden response has a 3xx status code
+func (o *PutStatusProbeForbidden) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this put status probe forbidden response has a 4xx status code
+func (o *PutStatusProbeForbidden) IsClientError() bool {
+ return true
+}
+
+// IsServerError returns true when this put status probe forbidden response has a 5xx status code
+func (o *PutStatusProbeForbidden) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this put status probe forbidden response a status code equal to that given
+func (o *PutStatusProbeForbidden) IsCode(code int) bool {
+ return code == 403
+}
+
+func (o *PutStatusProbeForbidden) Error() string {
+ return fmt.Sprintf("[PUT /status/probe][%d] putStatusProbeForbidden ", 403)
+}
+
+func (o *PutStatusProbeForbidden) String() string {
+ return fmt.Sprintf("[PUT /status/probe][%d] putStatusProbeForbidden ", 403)
+}
+
+func (o *PutStatusProbeForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ return nil
+}
+
+// NewPutStatusProbeFailed creates a PutStatusProbeFailed with default headers values
+func NewPutStatusProbeFailed() *PutStatusProbeFailed {
+ return &PutStatusProbeFailed{}
+}
+
+/*
+PutStatusProbeFailed describes a response with status code 500, with default header values.
+
+Internal error occurred while conducting connectivity probe
+*/
+type PutStatusProbeFailed struct {
+ Payload models.Error
+}
+
+// IsSuccess returns true when this put status probe failed response has a 2xx status code
+func (o *PutStatusProbeFailed) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this put status probe failed response has a 3xx status code
+func (o *PutStatusProbeFailed) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this put status probe failed response has a 4xx status code
+func (o *PutStatusProbeFailed) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this put status probe failed response has a 5xx status code
+func (o *PutStatusProbeFailed) IsServerError() bool {
+ return true
+}
+
+// IsCode returns true when this put status probe failed response a status code equal to that given
+func (o *PutStatusProbeFailed) IsCode(code int) bool {
+ return code == 500
+}
+
+func (o *PutStatusProbeFailed) Error() string {
+ return fmt.Sprintf("[PUT /status/probe][%d] putStatusProbeFailed %+v", 500, o.Payload)
+}
+
+func (o *PutStatusProbeFailed) String() string {
+ return fmt.Sprintf("[PUT /status/probe][%d] putStatusProbeFailed %+v", 500, o.Payload)
+}
+
+func (o *PutStatusProbeFailed) GetPayload() models.Error {
+ return o.Payload
+}
+
+func (o *PutStatusProbeFailed) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/health/client/restapi/get_healthz_parameters.go b/vendor/github.com/cilium/cilium/api/v1/health/client/restapi/get_healthz_parameters.go
new file mode 100644
index 000000000..3eb0ce8c8
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/health/client/restapi/get_healthz_parameters.go
@@ -0,0 +1,131 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package restapi
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "net/http"
+ "time"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/runtime"
+ cr "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+)
+
+// NewGetHealthzParams creates a new GetHealthzParams object,
+// with the default timeout for this client.
+//
+// Default values are not hydrated, since defaults are normally applied by the API server side.
+//
+// To enforce default values in parameter, use SetDefaults or WithDefaults.
+func NewGetHealthzParams() *GetHealthzParams {
+ return &GetHealthzParams{
+ timeout: cr.DefaultTimeout,
+ }
+}
+
+// NewGetHealthzParamsWithTimeout creates a new GetHealthzParams object
+// with the ability to set a timeout on a request.
+func NewGetHealthzParamsWithTimeout(timeout time.Duration) *GetHealthzParams {
+ return &GetHealthzParams{
+ timeout: timeout,
+ }
+}
+
+// NewGetHealthzParamsWithContext creates a new GetHealthzParams object
+// with the ability to set a context for a request.
+func NewGetHealthzParamsWithContext(ctx context.Context) *GetHealthzParams {
+ return &GetHealthzParams{
+ Context: ctx,
+ }
+}
+
+// NewGetHealthzParamsWithHTTPClient creates a new GetHealthzParams object
+// with the ability to set a custom HTTPClient for a request.
+func NewGetHealthzParamsWithHTTPClient(client *http.Client) *GetHealthzParams {
+ return &GetHealthzParams{
+ HTTPClient: client,
+ }
+}
+
+/*
+GetHealthzParams contains all the parameters to send to the API endpoint
+
+ for the get healthz operation.
+
+ Typically these are written to a http.Request.
+*/
+type GetHealthzParams struct {
+ timeout time.Duration
+ Context context.Context
+ HTTPClient *http.Client
+}
+
+// WithDefaults hydrates default values in the get healthz params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetHealthzParams) WithDefaults() *GetHealthzParams {
+ o.SetDefaults()
+ return o
+}
+
+// SetDefaults hydrates default values in the get healthz params (not the query body).
+//
+// All values with no default are reset to their zero value.
+func (o *GetHealthzParams) SetDefaults() {
+ // no default values defined for this parameter
+}
+
+// WithTimeout adds the timeout to the get healthz params
+func (o *GetHealthzParams) WithTimeout(timeout time.Duration) *GetHealthzParams {
+ o.SetTimeout(timeout)
+ return o
+}
+
+// SetTimeout adds the timeout to the get healthz params
+func (o *GetHealthzParams) SetTimeout(timeout time.Duration) {
+ o.timeout = timeout
+}
+
+// WithContext adds the context to the get healthz params
+func (o *GetHealthzParams) WithContext(ctx context.Context) *GetHealthzParams {
+ o.SetContext(ctx)
+ return o
+}
+
+// SetContext adds the context to the get healthz params
+func (o *GetHealthzParams) SetContext(ctx context.Context) {
+ o.Context = ctx
+}
+
+// WithHTTPClient adds the HTTPClient to the get healthz params
+func (o *GetHealthzParams) WithHTTPClient(client *http.Client) *GetHealthzParams {
+ o.SetHTTPClient(client)
+ return o
+}
+
+// SetHTTPClient adds the HTTPClient to the get healthz params
+func (o *GetHealthzParams) SetHTTPClient(client *http.Client) {
+ o.HTTPClient = client
+}
+
+// WriteToRequest writes these params to a swagger request
+func (o *GetHealthzParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
+
+ if err := r.SetTimeout(o.timeout); err != nil {
+ return err
+ }
+ var res []error
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/health/client/restapi/get_healthz_responses.go b/vendor/github.com/cilium/cilium/api/v1/health/client/restapi/get_healthz_responses.go
new file mode 100644
index 000000000..c30a21186
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/health/client/restapi/get_healthz_responses.go
@@ -0,0 +1,168 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package restapi
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+
+ "github.com/cilium/cilium/api/v1/health/models"
+)
+
+// GetHealthzReader is a Reader for the GetHealthz structure.
+type GetHealthzReader struct {
+ formats strfmt.Registry
+}
+
+// ReadResponse reads a server response into the received o.
+func (o *GetHealthzReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
+ switch response.Code() {
+ case 200:
+ result := NewGetHealthzOK()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return result, nil
+ case 500:
+ result := NewGetHealthzFailed()
+ if err := result.readResponse(response, consumer, o.formats); err != nil {
+ return nil, err
+ }
+ return nil, result
+ default:
+ return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code())
+ }
+}
+
+// NewGetHealthzOK creates a GetHealthzOK with default headers values
+func NewGetHealthzOK() *GetHealthzOK {
+ return &GetHealthzOK{}
+}
+
+/*
+GetHealthzOK describes a response with status code 200, with default header values.
+
+Success
+*/
+type GetHealthzOK struct {
+ Payload *models.HealthResponse
+}
+
+// IsSuccess returns true when this get healthz o k response has a 2xx status code
+func (o *GetHealthzOK) IsSuccess() bool {
+ return true
+}
+
+// IsRedirect returns true when this get healthz o k response has a 3xx status code
+func (o *GetHealthzOK) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get healthz o k response has a 4xx status code
+func (o *GetHealthzOK) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get healthz o k response has a 5xx status code
+func (o *GetHealthzOK) IsServerError() bool {
+ return false
+}
+
+// IsCode returns true when this get healthz o k response a status code equal to that given
+func (o *GetHealthzOK) IsCode(code int) bool {
+ return code == 200
+}
+
+func (o *GetHealthzOK) Error() string {
+ return fmt.Sprintf("[GET /healthz][%d] getHealthzOK %+v", 200, o.Payload)
+}
+
+func (o *GetHealthzOK) String() string {
+ return fmt.Sprintf("[GET /healthz][%d] getHealthzOK %+v", 200, o.Payload)
+}
+
+func (o *GetHealthzOK) GetPayload() *models.HealthResponse {
+ return o.Payload
+}
+
+func (o *GetHealthzOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ o.Payload = new(models.HealthResponse)
+
+ // response payload
+ if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// NewGetHealthzFailed creates a GetHealthzFailed with default headers values
+func NewGetHealthzFailed() *GetHealthzFailed {
+ return &GetHealthzFailed{}
+}
+
+/*
+GetHealthzFailed describes a response with status code 500, with default header values.
+
+Failed to contact local Cilium daemon
+*/
+type GetHealthzFailed struct {
+ Payload models.Error
+}
+
+// IsSuccess returns true when this get healthz failed response has a 2xx status code
+func (o *GetHealthzFailed) IsSuccess() bool {
+ return false
+}
+
+// IsRedirect returns true when this get healthz failed response has a 3xx status code
+func (o *GetHealthzFailed) IsRedirect() bool {
+ return false
+}
+
+// IsClientError returns true when this get healthz failed response has a 4xx status code
+func (o *GetHealthzFailed) IsClientError() bool {
+ return false
+}
+
+// IsServerError returns true when this get healthz failed response has a 5xx status code
+func (o *GetHealthzFailed) IsServerError() bool {
+ return true
+}
+
+// IsCode returns true when this get healthz failed response a status code equal to that given
+func (o *GetHealthzFailed) IsCode(code int) bool {
+ return code == 500
+}
+
+func (o *GetHealthzFailed) Error() string {
+ return fmt.Sprintf("[GET /healthz][%d] getHealthzFailed %+v", 500, o.Payload)
+}
+
+func (o *GetHealthzFailed) String() string {
+ return fmt.Sprintf("[GET /healthz][%d] getHealthzFailed %+v", 500, o.Payload)
+}
+
+func (o *GetHealthzFailed) GetPayload() models.Error {
+ return o.Payload
+}
+
+func (o *GetHealthzFailed) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
+
+ // response payload
+ if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/health/client/restapi/restapi_client.go b/vendor/github.com/cilium/cilium/api/v1/health/client/restapi/restapi_client.go
new file mode 100644
index 000000000..6e5b1614a
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/health/client/restapi/restapi_client.go
@@ -0,0 +1,87 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package restapi
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "fmt"
+
+ "github.com/go-openapi/runtime"
+ "github.com/go-openapi/strfmt"
+)
+
+// New creates a new restapi API client.
+func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService {
+ return &Client{transport: transport, formats: formats}
+}
+
+/*
+Client for restapi API
+*/
+type Client struct {
+ transport runtime.ClientTransport
+ formats strfmt.Registry
+}
+
+// ClientOption is the option for Client methods
+type ClientOption func(*runtime.ClientOperation)
+
+// ClientService is the interface for Client methods
+type ClientService interface {
+ GetHealthz(params *GetHealthzParams, opts ...ClientOption) (*GetHealthzOK, error)
+
+ SetTransport(transport runtime.ClientTransport)
+}
+
+/*
+ GetHealthz gets health of cilium node
+
+ Returns health and status information of the local node including
+
+load and uptime, as well as the status of related components including
+the Cilium daemon.
+*/
+func (a *Client) GetHealthz(params *GetHealthzParams, opts ...ClientOption) (*GetHealthzOK, error) {
+ // TODO: Validate the params before sending
+ if params == nil {
+ params = NewGetHealthzParams()
+ }
+ op := &runtime.ClientOperation{
+ ID: "GetHealthz",
+ Method: "GET",
+ PathPattern: "/healthz",
+ ProducesMediaTypes: []string{"application/json"},
+ ConsumesMediaTypes: []string{"application/json"},
+ Schemes: []string{"http"},
+ Params: params,
+ Reader: &GetHealthzReader{formats: a.formats},
+ Context: params.Context,
+ Client: params.HTTPClient,
+ }
+ for _, opt := range opts {
+ opt(op)
+ }
+
+ result, err := a.transport.Submit(op)
+ if err != nil {
+ return nil, err
+ }
+ success, ok := result.(*GetHealthzOK)
+ if ok {
+ return success, nil
+ }
+ // unexpected success response
+ // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue
+ msg := fmt.Sprintf("unexpected success response for GetHealthz: API contract not enforced by server. Client expected to get an error, but got: %T", result)
+ panic(msg)
+}
+
+// SetTransport changes the transport on the client
+func (a *Client) SetTransport(transport runtime.ClientTransport) {
+ a.transport = transport
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/health/models/connectivity_status.go b/vendor/github.com/cilium/cilium/api/v1/health/models/connectivity_status.go
new file mode 100644
index 000000000..123e3b416
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/health/models/connectivity_status.go
@@ -0,0 +1,56 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package models
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+)
+
+// ConnectivityStatus Connectivity status of a path
+//
+// swagger:model ConnectivityStatus
+type ConnectivityStatus struct {
+
+ // Round trip time to node in nanoseconds
+ Latency int64 `json:"latency,omitempty"`
+
+ // Human readable status/error/warning message
+ Status string `json:"status,omitempty"`
+}
+
+// Validate validates this connectivity status
+func (m *ConnectivityStatus) Validate(formats strfmt.Registry) error {
+ return nil
+}
+
+// ContextValidate validates this connectivity status based on context it is used
+func (m *ConnectivityStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *ConnectivityStatus) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *ConnectivityStatus) UnmarshalBinary(b []byte) error {
+ var res ConnectivityStatus
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/health/models/endpoint_status.go b/vendor/github.com/cilium/cilium/api/v1/health/models/endpoint_status.go
new file mode 100644
index 000000000..4b8c9daa2
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/health/models/endpoint_status.go
@@ -0,0 +1,165 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package models
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "strconv"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+)
+
+// EndpointStatus Connectivity status to host cilium-health endpoints via different paths
+//
+// swagger:model EndpointStatus
+type EndpointStatus struct {
+
+ // primary address
+ PrimaryAddress *PathStatus `json:"primary-address,omitempty"`
+
+ // secondary addresses
+ SecondaryAddresses []*PathStatus `json:"secondary-addresses"`
+}
+
+// Validate validates this endpoint status
+func (m *EndpointStatus) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validatePrimaryAddress(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateSecondaryAddresses(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *EndpointStatus) validatePrimaryAddress(formats strfmt.Registry) error {
+ if swag.IsZero(m.PrimaryAddress) { // not required
+ return nil
+ }
+
+ if m.PrimaryAddress != nil {
+ if err := m.PrimaryAddress.Validate(formats); err != nil {
+ if ve, ok := err.(*errors.Validation); ok {
+ return ve.ValidateName("primary-address")
+ } else if ce, ok := err.(*errors.CompositeError); ok {
+ return ce.ValidateName("primary-address")
+ }
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (m *EndpointStatus) validateSecondaryAddresses(formats strfmt.Registry) error {
+ if swag.IsZero(m.SecondaryAddresses) { // not required
+ return nil
+ }
+
+ for i := 0; i < len(m.SecondaryAddresses); i++ {
+ if swag.IsZero(m.SecondaryAddresses[i]) { // not required
+ continue
+ }
+
+ if m.SecondaryAddresses[i] != nil {
+ if err := m.SecondaryAddresses[i].Validate(formats); err != nil {
+ if ve, ok := err.(*errors.Validation); ok {
+ return ve.ValidateName("secondary-addresses" + "." + strconv.Itoa(i))
+ } else if ce, ok := err.(*errors.CompositeError); ok {
+ return ce.ValidateName("secondary-addresses" + "." + strconv.Itoa(i))
+ }
+ return err
+ }
+ }
+
+ }
+
+ return nil
+}
+
+// ContextValidate validate this endpoint status based on the context it is used
+func (m *EndpointStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.contextValidatePrimaryAddress(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.contextValidateSecondaryAddresses(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *EndpointStatus) contextValidatePrimaryAddress(ctx context.Context, formats strfmt.Registry) error {
+
+ if m.PrimaryAddress != nil {
+ if err := m.PrimaryAddress.ContextValidate(ctx, formats); err != nil {
+ if ve, ok := err.(*errors.Validation); ok {
+ return ve.ValidateName("primary-address")
+ } else if ce, ok := err.(*errors.CompositeError); ok {
+ return ce.ValidateName("primary-address")
+ }
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (m *EndpointStatus) contextValidateSecondaryAddresses(ctx context.Context, formats strfmt.Registry) error {
+
+ for i := 0; i < len(m.SecondaryAddresses); i++ {
+
+ if m.SecondaryAddresses[i] != nil {
+ if err := m.SecondaryAddresses[i].ContextValidate(ctx, formats); err != nil {
+ if ve, ok := err.(*errors.Validation); ok {
+ return ve.ValidateName("secondary-addresses" + "." + strconv.Itoa(i))
+ } else if ce, ok := err.(*errors.CompositeError); ok {
+ return ce.ValidateName("secondary-addresses" + "." + strconv.Itoa(i))
+ }
+ return err
+ }
+ }
+
+ }
+
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *EndpointStatus) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *EndpointStatus) UnmarshalBinary(b []byte) error {
+ var res EndpointStatus
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/health/models/error.go b/vendor/github.com/cilium/cilium/api/v1/health/models/error.go
new file mode 100644
index 000000000..83eccc860
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/health/models/error.go
@@ -0,0 +1,30 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package models
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+
+ "github.com/go-openapi/strfmt"
+)
+
+// Error error
+//
+// swagger:model error
+type Error string
+
+// Validate validates this error
+func (m Error) Validate(formats strfmt.Registry) error {
+ return nil
+}
+
+// ContextValidate validates this error based on context it is used
+func (m Error) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/health/models/health_response.go b/vendor/github.com/cilium/cilium/api/v1/health/models/health_response.go
new file mode 100644
index 000000000..d74c7044e
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/health/models/health_response.go
@@ -0,0 +1,153 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package models
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+
+ ciliumModels "github.com/cilium/cilium/api/v1/models"
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+)
+
+// HealthResponse Health and status information of local node
+//
+// swagger:model HealthResponse
+type HealthResponse struct {
+
+ // Status of Cilium daemon
+ Cilium ciliumModels.StatusResponse `json:"cilium,omitempty"`
+
+ // System load on node
+ SystemLoad *LoadResponse `json:"system-load,omitempty"`
+
+ // Uptime of cilium-health instance
+ Uptime string `json:"uptime,omitempty"`
+}
+
+// Validate validates this health response
+func (m *HealthResponse) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateCilium(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateSystemLoad(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *HealthResponse) validateCilium(formats strfmt.Registry) error {
+ if swag.IsZero(m.Cilium) { // not required
+ return nil
+ }
+
+ if err := m.Cilium.Validate(formats); err != nil {
+ if ve, ok := err.(*errors.Validation); ok {
+ return ve.ValidateName("cilium")
+ } else if ce, ok := err.(*errors.CompositeError); ok {
+ return ce.ValidateName("cilium")
+ }
+ return err
+ }
+
+ return nil
+}
+
+func (m *HealthResponse) validateSystemLoad(formats strfmt.Registry) error {
+ if swag.IsZero(m.SystemLoad) { // not required
+ return nil
+ }
+
+ if m.SystemLoad != nil {
+ if err := m.SystemLoad.Validate(formats); err != nil {
+ if ve, ok := err.(*errors.Validation); ok {
+ return ve.ValidateName("system-load")
+ } else if ce, ok := err.(*errors.CompositeError); ok {
+ return ce.ValidateName("system-load")
+ }
+ return err
+ }
+ }
+
+ return nil
+}
+
+// ContextValidate validate this health response based on the context it is used
+func (m *HealthResponse) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.contextValidateCilium(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.contextValidateSystemLoad(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *HealthResponse) contextValidateCilium(ctx context.Context, formats strfmt.Registry) error {
+
+ if err := m.Cilium.ContextValidate(ctx, formats); err != nil {
+ if ve, ok := err.(*errors.Validation); ok {
+ return ve.ValidateName("cilium")
+ } else if ce, ok := err.(*errors.CompositeError); ok {
+ return ce.ValidateName("cilium")
+ }
+ return err
+ }
+
+ return nil
+}
+
+func (m *HealthResponse) contextValidateSystemLoad(ctx context.Context, formats strfmt.Registry) error {
+
+ if m.SystemLoad != nil {
+ if err := m.SystemLoad.ContextValidate(ctx, formats); err != nil {
+ if ve, ok := err.(*errors.Validation); ok {
+ return ve.ValidateName("system-load")
+ } else if ce, ok := err.(*errors.CompositeError); ok {
+ return ce.ValidateName("system-load")
+ }
+ return err
+ }
+ }
+
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *HealthResponse) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *HealthResponse) UnmarshalBinary(b []byte) error {
+ var res HealthResponse
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/health/models/health_status_response.go b/vendor/github.com/cilium/cilium/api/v1/health/models/health_status_response.go
new file mode 100644
index 000000000..9f4a29d78
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/health/models/health_status_response.go
@@ -0,0 +1,168 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package models
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "strconv"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+)
+
+// HealthStatusResponse Connectivity status to other daemons
+//
+// swagger:model HealthStatusResponse
+type HealthStatusResponse struct {
+
+ // Description of the local node
+ Local *SelfStatus `json:"local,omitempty"`
+
+ // Connectivity status to each other node
+ Nodes []*NodeStatus `json:"nodes"`
+
+ // timestamp
+ Timestamp string `json:"timestamp,omitempty"`
+}
+
+// Validate validates this health status response
+func (m *HealthStatusResponse) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateLocal(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateNodes(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *HealthStatusResponse) validateLocal(formats strfmt.Registry) error {
+ if swag.IsZero(m.Local) { // not required
+ return nil
+ }
+
+ if m.Local != nil {
+ if err := m.Local.Validate(formats); err != nil {
+ if ve, ok := err.(*errors.Validation); ok {
+ return ve.ValidateName("local")
+ } else if ce, ok := err.(*errors.CompositeError); ok {
+ return ce.ValidateName("local")
+ }
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (m *HealthStatusResponse) validateNodes(formats strfmt.Registry) error {
+ if swag.IsZero(m.Nodes) { // not required
+ return nil
+ }
+
+ for i := 0; i < len(m.Nodes); i++ {
+ if swag.IsZero(m.Nodes[i]) { // not required
+ continue
+ }
+
+ if m.Nodes[i] != nil {
+ if err := m.Nodes[i].Validate(formats); err != nil {
+ if ve, ok := err.(*errors.Validation); ok {
+ return ve.ValidateName("nodes" + "." + strconv.Itoa(i))
+ } else if ce, ok := err.(*errors.CompositeError); ok {
+ return ce.ValidateName("nodes" + "." + strconv.Itoa(i))
+ }
+ return err
+ }
+ }
+
+ }
+
+ return nil
+}
+
+// ContextValidate validate this health status response based on the context it is used
+func (m *HealthStatusResponse) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.contextValidateLocal(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.contextValidateNodes(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *HealthStatusResponse) contextValidateLocal(ctx context.Context, formats strfmt.Registry) error {
+
+ if m.Local != nil {
+ if err := m.Local.ContextValidate(ctx, formats); err != nil {
+ if ve, ok := err.(*errors.Validation); ok {
+ return ve.ValidateName("local")
+ } else if ce, ok := err.(*errors.CompositeError); ok {
+ return ce.ValidateName("local")
+ }
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (m *HealthStatusResponse) contextValidateNodes(ctx context.Context, formats strfmt.Registry) error {
+
+ for i := 0; i < len(m.Nodes); i++ {
+
+ if m.Nodes[i] != nil {
+ if err := m.Nodes[i].ContextValidate(ctx, formats); err != nil {
+ if ve, ok := err.(*errors.Validation); ok {
+ return ve.ValidateName("nodes" + "." + strconv.Itoa(i))
+ } else if ce, ok := err.(*errors.CompositeError); ok {
+ return ce.ValidateName("nodes" + "." + strconv.Itoa(i))
+ }
+ return err
+ }
+ }
+
+ }
+
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *HealthStatusResponse) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *HealthStatusResponse) UnmarshalBinary(b []byte) error {
+ var res HealthStatusResponse
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/health/models/host_status.go b/vendor/github.com/cilium/cilium/api/v1/health/models/host_status.go
new file mode 100644
index 000000000..da114cf19
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/health/models/host_status.go
@@ -0,0 +1,166 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package models
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "strconv"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+)
+
+// HostStatus Connectivity status to host cilium-health instance via different paths,
+// probing via all known IP addresses
+//
+// swagger:model HostStatus
+type HostStatus struct {
+
+ // primary address
+ PrimaryAddress *PathStatus `json:"primary-address,omitempty"`
+
+ // secondary addresses
+ SecondaryAddresses []*PathStatus `json:"secondary-addresses"`
+}
+
+// Validate validates this host status
+func (m *HostStatus) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validatePrimaryAddress(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateSecondaryAddresses(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *HostStatus) validatePrimaryAddress(formats strfmt.Registry) error {
+ if swag.IsZero(m.PrimaryAddress) { // not required
+ return nil
+ }
+
+ if m.PrimaryAddress != nil {
+ if err := m.PrimaryAddress.Validate(formats); err != nil {
+ if ve, ok := err.(*errors.Validation); ok {
+ return ve.ValidateName("primary-address")
+ } else if ce, ok := err.(*errors.CompositeError); ok {
+ return ce.ValidateName("primary-address")
+ }
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (m *HostStatus) validateSecondaryAddresses(formats strfmt.Registry) error {
+ if swag.IsZero(m.SecondaryAddresses) { // not required
+ return nil
+ }
+
+ for i := 0; i < len(m.SecondaryAddresses); i++ {
+ if swag.IsZero(m.SecondaryAddresses[i]) { // not required
+ continue
+ }
+
+ if m.SecondaryAddresses[i] != nil {
+ if err := m.SecondaryAddresses[i].Validate(formats); err != nil {
+ if ve, ok := err.(*errors.Validation); ok {
+ return ve.ValidateName("secondary-addresses" + "." + strconv.Itoa(i))
+ } else if ce, ok := err.(*errors.CompositeError); ok {
+ return ce.ValidateName("secondary-addresses" + "." + strconv.Itoa(i))
+ }
+ return err
+ }
+ }
+
+ }
+
+ return nil
+}
+
+// ContextValidate validate this host status based on the context it is used
+func (m *HostStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.contextValidatePrimaryAddress(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.contextValidateSecondaryAddresses(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *HostStatus) contextValidatePrimaryAddress(ctx context.Context, formats strfmt.Registry) error {
+
+ if m.PrimaryAddress != nil {
+ if err := m.PrimaryAddress.ContextValidate(ctx, formats); err != nil {
+ if ve, ok := err.(*errors.Validation); ok {
+ return ve.ValidateName("primary-address")
+ } else if ce, ok := err.(*errors.CompositeError); ok {
+ return ce.ValidateName("primary-address")
+ }
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (m *HostStatus) contextValidateSecondaryAddresses(ctx context.Context, formats strfmt.Registry) error {
+
+ for i := 0; i < len(m.SecondaryAddresses); i++ {
+
+ if m.SecondaryAddresses[i] != nil {
+ if err := m.SecondaryAddresses[i].ContextValidate(ctx, formats); err != nil {
+ if ve, ok := err.(*errors.Validation); ok {
+ return ve.ValidateName("secondary-addresses" + "." + strconv.Itoa(i))
+ } else if ce, ok := err.(*errors.CompositeError); ok {
+ return ce.ValidateName("secondary-addresses" + "." + strconv.Itoa(i))
+ }
+ return err
+ }
+ }
+
+ }
+
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *HostStatus) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *HostStatus) UnmarshalBinary(b []byte) error {
+ var res HostStatus
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/health/models/load_response.go b/vendor/github.com/cilium/cilium/api/v1/health/models/load_response.go
new file mode 100644
index 000000000..d0ceddba7
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/health/models/load_response.go
@@ -0,0 +1,59 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package models
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+)
+
+// LoadResponse System load on node
+//
+// swagger:model LoadResponse
+type LoadResponse struct {
+
+ // Load average over the past 15 minutes
+ Last15min string `json:"last15min,omitempty"`
+
+ // Load average over the past minute
+ Last1min string `json:"last1min,omitempty"`
+
+ // Load average over the past 5 minutes
+ Last5min string `json:"last5min,omitempty"`
+}
+
+// Validate validates this load response
+func (m *LoadResponse) Validate(formats strfmt.Registry) error {
+ return nil
+}
+
+// ContextValidate validates this load response based on context it is used
+func (m *LoadResponse) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *LoadResponse) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *LoadResponse) UnmarshalBinary(b []byte) error {
+ var res LoadResponse
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/health/models/node_status.go b/vendor/github.com/cilium/cilium/api/v1/health/models/node_status.go
new file mode 100644
index 000000000..1bbd99533
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/health/models/node_status.go
@@ -0,0 +1,204 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package models
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+)
+
+// NodeStatus Connectivity status of a remote cilium-health instance
+//
+// swagger:model NodeStatus
+type NodeStatus struct {
+
+ // DEPRECATED: Please use the health-endpoint field instead, which
+ // supports reporting the status of different addresses for the endpoint
+ //
+ Endpoint *PathStatus `json:"endpoint,omitempty"`
+
+ // Connectivity status to simulated endpoint on the node
+ HealthEndpoint *EndpointStatus `json:"health-endpoint,omitempty"`
+
+ // Connectivity status to cilium-health instance on node IP
+ Host *HostStatus `json:"host,omitempty"`
+
+ // Identifying name for the node
+ Name string `json:"name,omitempty"`
+}
+
+// Validate validates this node status
+func (m *NodeStatus) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateEndpoint(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateHealthEndpoint(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateHost(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *NodeStatus) validateEndpoint(formats strfmt.Registry) error {
+ if swag.IsZero(m.Endpoint) { // not required
+ return nil
+ }
+
+ if m.Endpoint != nil {
+ if err := m.Endpoint.Validate(formats); err != nil {
+ if ve, ok := err.(*errors.Validation); ok {
+ return ve.ValidateName("endpoint")
+ } else if ce, ok := err.(*errors.CompositeError); ok {
+ return ce.ValidateName("endpoint")
+ }
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (m *NodeStatus) validateHealthEndpoint(formats strfmt.Registry) error {
+ if swag.IsZero(m.HealthEndpoint) { // not required
+ return nil
+ }
+
+ if m.HealthEndpoint != nil {
+ if err := m.HealthEndpoint.Validate(formats); err != nil {
+ if ve, ok := err.(*errors.Validation); ok {
+ return ve.ValidateName("health-endpoint")
+ } else if ce, ok := err.(*errors.CompositeError); ok {
+ return ce.ValidateName("health-endpoint")
+ }
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (m *NodeStatus) validateHost(formats strfmt.Registry) error {
+ if swag.IsZero(m.Host) { // not required
+ return nil
+ }
+
+ if m.Host != nil {
+ if err := m.Host.Validate(formats); err != nil {
+ if ve, ok := err.(*errors.Validation); ok {
+ return ve.ValidateName("host")
+ } else if ce, ok := err.(*errors.CompositeError); ok {
+ return ce.ValidateName("host")
+ }
+ return err
+ }
+ }
+
+ return nil
+}
+
+// ContextValidate validate this node status based on the context it is used
+func (m *NodeStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.contextValidateEndpoint(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.contextValidateHealthEndpoint(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.contextValidateHost(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *NodeStatus) contextValidateEndpoint(ctx context.Context, formats strfmt.Registry) error {
+
+ if m.Endpoint != nil {
+ if err := m.Endpoint.ContextValidate(ctx, formats); err != nil {
+ if ve, ok := err.(*errors.Validation); ok {
+ return ve.ValidateName("endpoint")
+ } else if ce, ok := err.(*errors.CompositeError); ok {
+ return ce.ValidateName("endpoint")
+ }
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (m *NodeStatus) contextValidateHealthEndpoint(ctx context.Context, formats strfmt.Registry) error {
+
+ if m.HealthEndpoint != nil {
+ if err := m.HealthEndpoint.ContextValidate(ctx, formats); err != nil {
+ if ve, ok := err.(*errors.Validation); ok {
+ return ve.ValidateName("health-endpoint")
+ } else if ce, ok := err.(*errors.CompositeError); ok {
+ return ce.ValidateName("health-endpoint")
+ }
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (m *NodeStatus) contextValidateHost(ctx context.Context, formats strfmt.Registry) error {
+
+ if m.Host != nil {
+ if err := m.Host.ContextValidate(ctx, formats); err != nil {
+ if ve, ok := err.(*errors.Validation); ok {
+ return ve.ValidateName("host")
+ } else if ce, ok := err.(*errors.CompositeError); ok {
+ return ce.ValidateName("host")
+ }
+ return err
+ }
+ }
+
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *NodeStatus) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *NodeStatus) UnmarshalBinary(b []byte) error {
+ var res NodeStatus
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/health/models/path_status.go b/vendor/github.com/cilium/cilium/api/v1/health/models/path_status.go
new file mode 100644
index 000000000..3a8669f3c
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/health/models/path_status.go
@@ -0,0 +1,157 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package models
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+)
+
+// PathStatus Connectivity status via different paths, for example using different
+// policies or service redirection
+//
+// swagger:model PathStatus
+type PathStatus struct {
+
+ // Connectivity status without policy applied
+ HTTP *ConnectivityStatus `json:"http,omitempty"`
+
+ // Basic ping connectivity status to node IP
+ Icmp *ConnectivityStatus `json:"icmp,omitempty"`
+
+ // IP address queried for the connectivity status
+ IP string `json:"ip,omitempty"`
+}
+
+// Validate validates this path status
+func (m *PathStatus) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateHTTP(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateIcmp(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *PathStatus) validateHTTP(formats strfmt.Registry) error {
+ if swag.IsZero(m.HTTP) { // not required
+ return nil
+ }
+
+ if m.HTTP != nil {
+ if err := m.HTTP.Validate(formats); err != nil {
+ if ve, ok := err.(*errors.Validation); ok {
+ return ve.ValidateName("http")
+ } else if ce, ok := err.(*errors.CompositeError); ok {
+ return ce.ValidateName("http")
+ }
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (m *PathStatus) validateIcmp(formats strfmt.Registry) error {
+ if swag.IsZero(m.Icmp) { // not required
+ return nil
+ }
+
+ if m.Icmp != nil {
+ if err := m.Icmp.Validate(formats); err != nil {
+ if ve, ok := err.(*errors.Validation); ok {
+ return ve.ValidateName("icmp")
+ } else if ce, ok := err.(*errors.CompositeError); ok {
+ return ce.ValidateName("icmp")
+ }
+ return err
+ }
+ }
+
+ return nil
+}
+
+// ContextValidate validate this path status based on the context it is used
+func (m *PathStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.contextValidateHTTP(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.contextValidateIcmp(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *PathStatus) contextValidateHTTP(ctx context.Context, formats strfmt.Registry) error {
+
+ if m.HTTP != nil {
+ if err := m.HTTP.ContextValidate(ctx, formats); err != nil {
+ if ve, ok := err.(*errors.Validation); ok {
+ return ve.ValidateName("http")
+ } else if ce, ok := err.(*errors.CompositeError); ok {
+ return ce.ValidateName("http")
+ }
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (m *PathStatus) contextValidateIcmp(ctx context.Context, formats strfmt.Registry) error {
+
+ if m.Icmp != nil {
+ if err := m.Icmp.ContextValidate(ctx, formats); err != nil {
+ if ve, ok := err.(*errors.Validation); ok {
+ return ve.ValidateName("icmp")
+ } else if ce, ok := err.(*errors.CompositeError); ok {
+ return ce.ValidateName("icmp")
+ }
+ return err
+ }
+ }
+
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *PathStatus) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *PathStatus) UnmarshalBinary(b []byte) error {
+ var res PathStatus
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/health/models/self_status.go b/vendor/github.com/cilium/cilium/api/v1/health/models/self_status.go
new file mode 100644
index 000000000..0f8609367
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/health/models/self_status.go
@@ -0,0 +1,53 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package models
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+)
+
+// SelfStatus Description of the cilium-health node
+//
+// swagger:model SelfStatus
+type SelfStatus struct {
+
+ // Name associated with this node
+ Name string `json:"name,omitempty"`
+}
+
+// Validate validates this self status
+func (m *SelfStatus) Validate(formats strfmt.Registry) error {
+ return nil
+}
+
+// ContextValidate validates this self status based on context it is used
+func (m *SelfStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *SelfStatus) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *SelfStatus) UnmarshalBinary(b []byte) error {
+ var res SelfStatus
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/models/bgp_family.go b/vendor/github.com/cilium/cilium/api/v1/models/bgp_family.go
new file mode 100644
index 000000000..5093e85ef
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/models/bgp_family.go
@@ -0,0 +1,56 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package models
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+)
+
+// BgpFamily Address Family Indicator (AFI) and Subsequent Address Family Indicator (SAFI) of the path
+//
+// swagger:model BgpFamily
+type BgpFamily struct {
+
+ // Address Family Indicator (AFI) of the path
+ Afi string `json:"afi,omitempty"`
+
+ // Subsequent Address Family Indicator (SAFI) of the path
+ Safi string `json:"safi,omitempty"`
+}
+
+// Validate validates this bgp family
+func (m *BgpFamily) Validate(formats strfmt.Registry) error {
+ return nil
+}
+
+// ContextValidate validates this bgp family based on context it is used
+func (m *BgpFamily) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *BgpFamily) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *BgpFamily) UnmarshalBinary(b []byte) error {
+ var res BgpFamily
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/models/bgp_nlri.go b/vendor/github.com/cilium/cilium/api/v1/models/bgp_nlri.go
new file mode 100644
index 000000000..a6455398f
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/models/bgp_nlri.go
@@ -0,0 +1,53 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package models
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+)
+
+// BgpNlri Network Layer Reachability Information (NLRI) of the path
+//
+// swagger:model BgpNlri
+type BgpNlri struct {
+
+ // Base64-encoded NLRI in the BGP UPDATE message format
+ Base64 string `json:"base64,omitempty"`
+}
+
+// Validate validates this bgp nlri
+func (m *BgpNlri) Validate(formats strfmt.Registry) error {
+ return nil
+}
+
+// ContextValidate validates this bgp nlri based on context it is used
+func (m *BgpNlri) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *BgpNlri) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *BgpNlri) UnmarshalBinary(b []byte) error {
+ var res BgpNlri
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/models/bgp_path.go b/vendor/github.com/cilium/cilium/api/v1/models/bgp_path.go
new file mode 100644
index 000000000..6a8f49bef
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/models/bgp_path.go
@@ -0,0 +1,220 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package models
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "strconv"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+)
+
+// BgpPath Single BGP routing Path containing BGP Network Layer Reachability Information (NLRI) and path attributes
+//
+// swagger:model BgpPath
+type BgpPath struct {
+
+ // Age of the path (time since its creation) in nanoseconds
+ AgeNanoseconds int64 `json:"age-nanoseconds,omitempty"`
+
+ // True value flags the best path towards the destination prefix
+ Best bool `json:"best,omitempty"`
+
+ // Address Family Indicator (AFI) and Subsequent Address Family Indicator (SAFI) of the path
+ Family *BgpFamily `json:"family,omitempty"`
+
+ // Network Layer Reachability Information of the path
+ Nlri *BgpNlri `json:"nlri,omitempty"`
+
+ // List of BGP path attributes specific for the path
+ PathAttributes []*BgpPathAttribute `json:"path-attributes"`
+
+ // True value marks the path as stale
+ Stale bool `json:"stale,omitempty"`
+}
+
+// Validate validates this bgp path
+func (m *BgpPath) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateFamily(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validateNlri(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.validatePathAttributes(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *BgpPath) validateFamily(formats strfmt.Registry) error {
+ if swag.IsZero(m.Family) { // not required
+ return nil
+ }
+
+ if m.Family != nil {
+ if err := m.Family.Validate(formats); err != nil {
+ if ve, ok := err.(*errors.Validation); ok {
+ return ve.ValidateName("family")
+ } else if ce, ok := err.(*errors.CompositeError); ok {
+ return ce.ValidateName("family")
+ }
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (m *BgpPath) validateNlri(formats strfmt.Registry) error {
+ if swag.IsZero(m.Nlri) { // not required
+ return nil
+ }
+
+ if m.Nlri != nil {
+ if err := m.Nlri.Validate(formats); err != nil {
+ if ve, ok := err.(*errors.Validation); ok {
+ return ve.ValidateName("nlri")
+ } else if ce, ok := err.(*errors.CompositeError); ok {
+ return ce.ValidateName("nlri")
+ }
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (m *BgpPath) validatePathAttributes(formats strfmt.Registry) error {
+ if swag.IsZero(m.PathAttributes) { // not required
+ return nil
+ }
+
+ for i := 0; i < len(m.PathAttributes); i++ {
+ if swag.IsZero(m.PathAttributes[i]) { // not required
+ continue
+ }
+
+ if m.PathAttributes[i] != nil {
+ if err := m.PathAttributes[i].Validate(formats); err != nil {
+ if ve, ok := err.(*errors.Validation); ok {
+ return ve.ValidateName("path-attributes" + "." + strconv.Itoa(i))
+ } else if ce, ok := err.(*errors.CompositeError); ok {
+ return ce.ValidateName("path-attributes" + "." + strconv.Itoa(i))
+ }
+ return err
+ }
+ }
+
+ }
+
+ return nil
+}
+
+// ContextValidate validate this bgp path based on the context it is used
+func (m *BgpPath) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.contextValidateFamily(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.contextValidateNlri(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.contextValidatePathAttributes(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *BgpPath) contextValidateFamily(ctx context.Context, formats strfmt.Registry) error {
+
+ if m.Family != nil {
+ if err := m.Family.ContextValidate(ctx, formats); err != nil {
+ if ve, ok := err.(*errors.Validation); ok {
+ return ve.ValidateName("family")
+ } else if ce, ok := err.(*errors.CompositeError); ok {
+ return ce.ValidateName("family")
+ }
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (m *BgpPath) contextValidateNlri(ctx context.Context, formats strfmt.Registry) error {
+
+ if m.Nlri != nil {
+ if err := m.Nlri.ContextValidate(ctx, formats); err != nil {
+ if ve, ok := err.(*errors.Validation); ok {
+ return ve.ValidateName("nlri")
+ } else if ce, ok := err.(*errors.CompositeError); ok {
+ return ce.ValidateName("nlri")
+ }
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (m *BgpPath) contextValidatePathAttributes(ctx context.Context, formats strfmt.Registry) error {
+
+ for i := 0; i < len(m.PathAttributes); i++ {
+
+ if m.PathAttributes[i] != nil {
+ if err := m.PathAttributes[i].ContextValidate(ctx, formats); err != nil {
+ if ve, ok := err.(*errors.Validation); ok {
+ return ve.ValidateName("path-attributes" + "." + strconv.Itoa(i))
+ } else if ce, ok := err.(*errors.CompositeError); ok {
+ return ce.ValidateName("path-attributes" + "." + strconv.Itoa(i))
+ }
+ return err
+ }
+ }
+
+ }
+
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *BgpPath) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *BgpPath) UnmarshalBinary(b []byte) error {
+ var res BgpPath
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/models/bgp_path_attribute.go b/vendor/github.com/cilium/cilium/api/v1/models/bgp_path_attribute.go
new file mode 100644
index 000000000..cd9292947
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/models/bgp_path_attribute.go
@@ -0,0 +1,53 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package models
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+)
+
+// BgpPathAttribute Single BGP path attribute specific for the path
+//
+// swagger:model BgpPathAttribute
+type BgpPathAttribute struct {
+
+ // Base64-encoded BGP path attribute in the BGP UPDATE message format
+ Base64 string `json:"base64,omitempty"`
+}
+
+// Validate validates this bgp path attribute
+func (m *BgpPathAttribute) Validate(formats strfmt.Registry) error {
+ return nil
+}
+
+// ContextValidate validates this bgp path attribute based on context it is used
+func (m *BgpPathAttribute) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *BgpPathAttribute) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *BgpPathAttribute) UnmarshalBinary(b []byte) error {
+ var res BgpPathAttribute
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/models/bgp_peer.go b/vendor/github.com/cilium/cilium/api/v1/models/bgp_peer.go
index 7b22ce050..59a63154c 100644
--- a/vendor/github.com/cilium/cilium/api/v1/models/bgp_peer.go
+++ b/vendor/github.com/cilium/cilium/api/v1/models/bgp_peer.go
@@ -78,6 +78,9 @@ type BgpPeer struct {
//
SessionState string `json:"session-state,omitempty"`
+ // Set when a TCP password is configured for communications with this peer
+ TCPPasswordEnabled bool `json:"tcp-password-enabled,omitempty"`
+
// BGP peer connection uptime in nano seconds.
UptimeNanoseconds int64 `json:"uptime-nanoseconds,omitempty"`
}
diff --git a/vendor/github.com/cilium/cilium/api/v1/models/bgp_route.go b/vendor/github.com/cilium/cilium/api/v1/models/bgp_route.go
new file mode 100644
index 000000000..62b381e0a
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/models/bgp_route.go
@@ -0,0 +1,125 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package models
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "strconv"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+)
+
+// BgpRoute Single BGP route retrieved from the RIB of underlying router
+//
+// swagger:model BgpRoute
+type BgpRoute struct {
+
+ // List of routing paths leading towards the prefix
+ Paths []*BgpPath `json:"paths"`
+
+ // IP prefix of the route
+ Prefix string `json:"prefix,omitempty"`
+
+ // Autonomous System Number (ASN) identifying a BGP virtual router instance
+ RouterAsn int64 `json:"router-asn,omitempty"`
+}
+
+// Validate validates this bgp route
+func (m *BgpRoute) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validatePaths(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *BgpRoute) validatePaths(formats strfmt.Registry) error {
+ if swag.IsZero(m.Paths) { // not required
+ return nil
+ }
+
+ for i := 0; i < len(m.Paths); i++ {
+ if swag.IsZero(m.Paths[i]) { // not required
+ continue
+ }
+
+ if m.Paths[i] != nil {
+ if err := m.Paths[i].Validate(formats); err != nil {
+ if ve, ok := err.(*errors.Validation); ok {
+ return ve.ValidateName("paths" + "." + strconv.Itoa(i))
+ } else if ce, ok := err.(*errors.CompositeError); ok {
+ return ce.ValidateName("paths" + "." + strconv.Itoa(i))
+ }
+ return err
+ }
+ }
+
+ }
+
+ return nil
+}
+
+// ContextValidate validate this bgp route based on the context it is used
+func (m *BgpRoute) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.contextValidatePaths(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *BgpRoute) contextValidatePaths(ctx context.Context, formats strfmt.Registry) error {
+
+ for i := 0; i < len(m.Paths); i++ {
+
+ if m.Paths[i] != nil {
+ if err := m.Paths[i].ContextValidate(ctx, formats); err != nil {
+ if ve, ok := err.(*errors.Validation); ok {
+ return ve.ValidateName("paths" + "." + strconv.Itoa(i))
+ } else if ce, ok := err.(*errors.CompositeError); ok {
+ return ce.ValidateName("paths" + "." + strconv.Itoa(i))
+ }
+ return err
+ }
+ }
+
+ }
+
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *BgpRoute) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *BgpRoute) UnmarshalBinary(b []byte) error {
+ var res BgpRoute
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/models/debug_info.go b/vendor/github.com/cilium/cilium/api/v1/models/debug_info.go
index 128dd8dd6..db539d021 100644
--- a/vendor/github.com/cilium/cilium/api/v1/models/debug_info.go
+++ b/vendor/github.com/cilium/cilium/api/v1/models/debug_info.go
@@ -336,7 +336,7 @@ func (m *DebugInfo) UnmarshalBinary(b []byte) error {
// swagger:model DebugInfoEncryption
type DebugInfoEncryption struct {
- // Status of the Wireguard agent
+ // Status of the WireGuard agent
Wireguard *WireguardStatus `json:"wireguard,omitempty"`
}
diff --git a/vendor/github.com/cilium/cilium/api/v1/models/encryption_status.go b/vendor/github.com/cilium/cilium/api/v1/models/encryption_status.go
index 4392fb097..286f0218a 100644
--- a/vendor/github.com/cilium/cilium/api/v1/models/encryption_status.go
+++ b/vendor/github.com/cilium/cilium/api/v1/models/encryption_status.go
@@ -32,7 +32,7 @@ type EncryptionStatus struct {
// Human readable status/error/warning message
Msg string `json:"msg,omitempty"`
- // Status of the Wireguard agent
+ // Status of the WireGuard agent
Wireguard *WireguardStatus `json:"wireguard,omitempty"`
}
diff --git a/vendor/github.com/cilium/cilium/api/v1/models/endpoint_batch_delete_request.go b/vendor/github.com/cilium/cilium/api/v1/models/endpoint_batch_delete_request.go
new file mode 100644
index 000000000..795e79d6b
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/models/endpoint_batch_delete_request.go
@@ -0,0 +1,53 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package models
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+)
+
+// EndpointBatchDeleteRequest Properties selecting a batch of endpoints to delete.
+//
+// swagger:model EndpointBatchDeleteRequest
+type EndpointBatchDeleteRequest struct {
+
+ // ID assigned by container runtime
+ ContainerID string `json:"container-id,omitempty"`
+}
+
+// Validate validates this endpoint batch delete request
+func (m *EndpointBatchDeleteRequest) Validate(formats strfmt.Registry) error {
+ return nil
+}
+
+// ContextValidate validates this endpoint batch delete request based on context it is used
+func (m *EndpointBatchDeleteRequest) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *EndpointBatchDeleteRequest) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *EndpointBatchDeleteRequest) UnmarshalBinary(b []byte) error {
+ var res EndpointBatchDeleteRequest
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/models/endpoint_change_request.go b/vendor/github.com/cilium/cilium/api/v1/models/endpoint_change_request.go
index f70e9e43f..e1be73caa 100644
--- a/vendor/github.com/cilium/cilium/api/v1/models/endpoint_change_request.go
+++ b/vendor/github.com/cilium/cilium/api/v1/models/endpoint_change_request.go
@@ -28,6 +28,9 @@ type EndpointChangeRequest struct {
// ID assigned by container runtime
ContainerID string `json:"container-id,omitempty"`
+ // Name of network device in container netns
+ ContainerInterfaceName string `json:"container-interface-name,omitempty"`
+
// Name assigned to container
ContainerName string `json:"container-name,omitempty"`
@@ -37,6 +40,9 @@ type EndpointChangeRequest struct {
// ID of datapath tail call map
DatapathMapID int64 `json:"datapath-map-id,omitempty"`
+ // Disables lookup using legacy endpoint identifiers (container name, container id, pod name) for this endpoint
+ DisableLegacyIdentifiers bool `json:"disable-legacy-identifiers,omitempty"`
+
// Docker endpoint ID
DockerEndpointID string `json:"docker-endpoint-id,omitempty"`
@@ -49,10 +55,10 @@ type EndpointChangeRequest struct {
// Local endpoint ID
ID int64 `json:"id,omitempty"`
- // Index of network device
+ // Index of network device in host netns
InterfaceIndex int64 `json:"interface-index,omitempty"`
- // Name of network device
+ // Name of network device in host netns
InterfaceName string `json:"interface-name,omitempty"`
// Kubernetes namespace name
diff --git a/vendor/github.com/cilium/cilium/api/v1/models/endpoint_identifiers.go b/vendor/github.com/cilium/cilium/api/v1/models/endpoint_identifiers.go
index 09a26aa38..380d26784 100644
--- a/vendor/github.com/cilium/cilium/api/v1/models/endpoint_identifiers.go
+++ b/vendor/github.com/cilium/cilium/api/v1/models/endpoint_identifiers.go
@@ -22,10 +22,13 @@ import (
// swagger:model EndpointIdentifiers
type EndpointIdentifiers struct {
- // ID assigned by container runtime
+ // ID assigned to this attachment by container runtime
+ CniAttachmentID string `json:"cni-attachment-id,omitempty"`
+
+ // ID assigned by container runtime (deprecated, may not be unique)
ContainerID string `json:"container-id,omitempty"`
- // Name assigned to container
+ // Name assigned to container (deprecated, may not be unique)
ContainerName string `json:"container-name,omitempty"`
// Docker endpoint ID
@@ -34,13 +37,13 @@ type EndpointIdentifiers struct {
// Docker network ID
DockerNetworkID string `json:"docker-network-id,omitempty"`
- // K8s namespace for this endpoint
+ // K8s namespace for this endpoint (deprecated, may not be unique)
K8sNamespace string `json:"k8s-namespace,omitempty"`
- // K8s pod name for this endpoint
+ // K8s pod name for this endpoint (deprecated, may not be unique)
K8sPodName string `json:"k8s-pod-name,omitempty"`
- // K8s pod for this endpoint(Deprecated, use K8sPodName and K8sNamespace instead)
+ // K8s pod for this endpoint (deprecated, may not be unique)
PodName string `json:"pod-name,omitempty"`
}
diff --git a/vendor/github.com/cilium/cilium/api/v1/models/endpoint_networking.go b/vendor/github.com/cilium/cilium/api/v1/models/endpoint_networking.go
index d322ca639..893edd301 100644
--- a/vendor/github.com/cilium/cilium/api/v1/models/endpoint_networking.go
+++ b/vendor/github.com/cilium/cilium/api/v1/models/endpoint_networking.go
@@ -25,16 +25,19 @@ type EndpointNetworking struct {
// IP4/6 addresses assigned to this Endpoint
Addressing []*AddressPair `json:"addressing"`
+ // Name of network device in container netns
+ ContainerInterfaceName string `json:"container-interface-name,omitempty"`
+
// host addressing
HostAddressing *NodeAddressing `json:"host-addressing,omitempty"`
// MAC address
HostMac string `json:"host-mac,omitempty"`
- // Index of network device
+ // Index of network device in host netns
InterfaceIndex int64 `json:"interface-index,omitempty"`
- // Name of network device
+ // Name of network device in host netns
InterfaceName string `json:"interface-name,omitempty"`
// MAC address
diff --git a/vendor/github.com/cilium/cilium/api/v1/models/module_health.go b/vendor/github.com/cilium/cilium/api/v1/models/module_health.go
new file mode 100644
index 000000000..7fd6ad30e
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/models/module_health.go
@@ -0,0 +1,65 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package models
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+)
+
+// ModuleHealth Report module health status
+//
+// swagger:model ModuleHealth
+type ModuleHealth struct {
+
+ // Time at which the last OK check occurred
+ LastOk string `json:"last-ok,omitempty"`
+
+ // Time of last health update
+ LastUpdated string `json:"last-updated,omitempty"`
+
+ // Describes the health status level
+ Level string `json:"level,omitempty"`
+
+ // Reports the associated health message
+ Message string `json:"message,omitempty"`
+
+ // Describes the module identitier
+ ModuleID string `json:"module-id,omitempty"`
+}
+
+// Validate validates this module health
+func (m *ModuleHealth) Validate(formats strfmt.Registry) error {
+ return nil
+}
+
+// ContextValidate validates this module health based on context it is used
+func (m *ModuleHealth) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *ModuleHealth) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *ModuleHealth) UnmarshalBinary(b []byte) error {
+ var res ModuleHealth
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/models/modules_health.go b/vendor/github.com/cilium/cilium/api/v1/models/modules_health.go
new file mode 100644
index 000000000..de7d224a1
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/models/modules_health.go
@@ -0,0 +1,119 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package models
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+ "strconv"
+
+ "github.com/go-openapi/errors"
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+)
+
+// ModulesHealth Reports health status of agent's modules
+//
+// swagger:model ModulesHealth
+type ModulesHealth struct {
+
+ // List out modules health status
+ Modules []*ModuleHealth `json:"modules"`
+}
+
+// Validate validates this modules health
+func (m *ModulesHealth) Validate(formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.validateModules(formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *ModulesHealth) validateModules(formats strfmt.Registry) error {
+ if swag.IsZero(m.Modules) { // not required
+ return nil
+ }
+
+ for i := 0; i < len(m.Modules); i++ {
+ if swag.IsZero(m.Modules[i]) { // not required
+ continue
+ }
+
+ if m.Modules[i] != nil {
+ if err := m.Modules[i].Validate(formats); err != nil {
+ if ve, ok := err.(*errors.Validation); ok {
+ return ve.ValidateName("modules" + "." + strconv.Itoa(i))
+ } else if ce, ok := err.(*errors.CompositeError); ok {
+ return ce.ValidateName("modules" + "." + strconv.Itoa(i))
+ }
+ return err
+ }
+ }
+
+ }
+
+ return nil
+}
+
+// ContextValidate validate this modules health based on the context it is used
+func (m *ModulesHealth) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.contextValidateModules(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *ModulesHealth) contextValidateModules(ctx context.Context, formats strfmt.Registry) error {
+
+ for i := 0; i < len(m.Modules); i++ {
+
+ if m.Modules[i] != nil {
+ if err := m.Modules[i].ContextValidate(ctx, formats); err != nil {
+ if ve, ok := err.(*errors.Validation); ok {
+ return ve.ValidateName("modules" + "." + strconv.Itoa(i))
+ } else if ce, ok := err.(*errors.CompositeError); ok {
+ return ce.ValidateName("modules" + "." + strconv.Itoa(i))
+ }
+ return err
+ }
+ }
+
+ }
+
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *ModulesHealth) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *ModulesHealth) UnmarshalBinary(b []byte) error {
+ var res ModulesHealth
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/models/remote_cluster.go b/vendor/github.com/cilium/cilium/api/v1/models/remote_cluster.go
index cd092aa01..a8d8c2be9 100644
--- a/vendor/github.com/cilium/cilium/api/v1/models/remote_cluster.go
+++ b/vendor/github.com/cilium/cilium/api/v1/models/remote_cluster.go
@@ -24,6 +24,12 @@ import (
// swagger:model RemoteCluster
type RemoteCluster struct {
+ // Cluster configuration exposed by the remote cluster
+ Config *RemoteClusterConfig `json:"config,omitempty"`
+
+ // Indicates whether the connection to the remote kvstore is established
+ Connected bool `json:"connected,omitempty"`
+
// Time of last failure that occurred while attempting to reach the cluster
// Format: date-time
LastFailure strfmt.DateTime `json:"last-failure,omitempty"`
@@ -31,6 +37,9 @@ type RemoteCluster struct {
// Name of the cluster
Name string `json:"name,omitempty"`
+ // Number of endpoints in the cluster
+ NumEndpoints int64 `json:"num-endpoints,omitempty"`
+
// Number of failures reaching the cluster
NumFailures int64 `json:"num-failures,omitempty"`
@@ -43,27 +52,57 @@ type RemoteCluster struct {
// Number of services in the cluster
NumSharedServices int64 `json:"num-shared-services,omitempty"`
- // Indicates readiness of the remote cluser
+ // Indicates readiness of the remote cluster
Ready bool `json:"ready,omitempty"`
// Status of the control plane
Status string `json:"status,omitempty"`
+
+ // Synchronization status about each resource type
+ Synced *RemoteClusterSynced `json:"synced,omitempty"`
}
// Validate validates this remote cluster
func (m *RemoteCluster) Validate(formats strfmt.Registry) error {
var res []error
+ if err := m.validateConfig(formats); err != nil {
+ res = append(res, err)
+ }
+
if err := m.validateLastFailure(formats); err != nil {
res = append(res, err)
}
+ if err := m.validateSynced(formats); err != nil {
+ res = append(res, err)
+ }
+
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
+func (m *RemoteCluster) validateConfig(formats strfmt.Registry) error {
+ if swag.IsZero(m.Config) { // not required
+ return nil
+ }
+
+ if m.Config != nil {
+ if err := m.Config.Validate(formats); err != nil {
+ if ve, ok := err.(*errors.Validation); ok {
+ return ve.ValidateName("config")
+ } else if ce, ok := err.(*errors.CompositeError); ok {
+ return ce.ValidateName("config")
+ }
+ return err
+ }
+ }
+
+ return nil
+}
+
func (m *RemoteCluster) validateLastFailure(formats strfmt.Registry) error {
if swag.IsZero(m.LastFailure) { // not required
return nil
@@ -76,8 +115,72 @@ func (m *RemoteCluster) validateLastFailure(formats strfmt.Registry) error {
return nil
}
-// ContextValidate validates this remote cluster based on context it is used
+func (m *RemoteCluster) validateSynced(formats strfmt.Registry) error {
+ if swag.IsZero(m.Synced) { // not required
+ return nil
+ }
+
+ if m.Synced != nil {
+ if err := m.Synced.Validate(formats); err != nil {
+ if ve, ok := err.(*errors.Validation); ok {
+ return ve.ValidateName("synced")
+ } else if ce, ok := err.(*errors.CompositeError); ok {
+ return ce.ValidateName("synced")
+ }
+ return err
+ }
+ }
+
+ return nil
+}
+
+// ContextValidate validate this remote cluster based on the context it is used
func (m *RemoteCluster) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ var res []error
+
+ if err := m.contextValidateConfig(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if err := m.contextValidateSynced(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
+ if len(res) > 0 {
+ return errors.CompositeValidationError(res...)
+ }
+ return nil
+}
+
+func (m *RemoteCluster) contextValidateConfig(ctx context.Context, formats strfmt.Registry) error {
+
+ if m.Config != nil {
+ if err := m.Config.ContextValidate(ctx, formats); err != nil {
+ if ve, ok := err.(*errors.Validation); ok {
+ return ve.ValidateName("config")
+ } else if ce, ok := err.(*errors.CompositeError); ok {
+ return ce.ValidateName("config")
+ }
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (m *RemoteCluster) contextValidateSynced(ctx context.Context, formats strfmt.Registry) error {
+
+ if m.Synced != nil {
+ if err := m.Synced.ContextValidate(ctx, formats); err != nil {
+ if ve, ok := err.(*errors.Validation); ok {
+ return ve.ValidateName("synced")
+ } else if ce, ok := err.(*errors.CompositeError); ok {
+ return ce.ValidateName("synced")
+ }
+ return err
+ }
+ }
+
return nil
}
diff --git a/vendor/github.com/cilium/cilium/api/v1/models/remote_cluster_config.go b/vendor/github.com/cilium/cilium/api/v1/models/remote_cluster_config.go
new file mode 100644
index 000000000..0b2ae22dd
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/models/remote_cluster_config.go
@@ -0,0 +1,67 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package models
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+)
+
+// RemoteClusterConfig Cluster configuration exposed by the remote cluster
+//
+// +k8s:deepcopy-gen=true
+//
+// swagger:model RemoteClusterConfig
+type RemoteClusterConfig struct {
+
+ // The Cluster ID advertised by the remote cluster
+ ClusterID int64 `json:"cluster-id,omitempty"`
+
+ // Whether the remote cluster information is locally cached by kvstoremesh
+ Kvstoremesh bool `json:"kvstoremesh,omitempty"`
+
+ // Whether the configuration is required to be present
+ Required bool `json:"required,omitempty"`
+
+ // Whether the configuration has been correctly retrieved
+ Retrieved bool `json:"retrieved,omitempty"`
+
+ // Whether the remote cluster supports per-prefix "synced" canaries
+ SyncCanaries bool `json:"sync-canaries,omitempty"`
+}
+
+// Validate validates this remote cluster config
+func (m *RemoteClusterConfig) Validate(formats strfmt.Registry) error {
+ return nil
+}
+
+// ContextValidate validates this remote cluster config based on context it is used
+func (m *RemoteClusterConfig) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *RemoteClusterConfig) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *RemoteClusterConfig) UnmarshalBinary(b []byte) error {
+ var res RemoteClusterConfig
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/models/remote_cluster_synced.go b/vendor/github.com/cilium/cilium/api/v1/models/remote_cluster_synced.go
new file mode 100644
index 000000000..8c1151ba3
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/api/v1/models/remote_cluster_synced.go
@@ -0,0 +1,67 @@
+// Code generated by go-swagger; DO NOT EDIT.
+
+// Copyright Authors of Cilium
+// SPDX-License-Identifier: Apache-2.0
+
+package models
+
+// This file was generated by the swagger tool.
+// Editing this file might prove futile when you re-run the swagger generate command
+
+import (
+ "context"
+
+ "github.com/go-openapi/strfmt"
+ "github.com/go-openapi/swag"
+)
+
+// RemoteClusterSynced Status of the synchronization with the remote cluster, about each resource
+// type. A given resource is considered to be synchronized if the initial
+// list of entries has been completely received from the remote cluster, and
+// new events are currently being watched.
+//
+// +k8s:deepcopy-gen=true
+//
+// swagger:model RemoteClusterSynced
+type RemoteClusterSynced struct {
+
+ // Endpoints synchronization status
+ Endpoints bool `json:"endpoints,omitempty"`
+
+ // Identities synchronization status
+ Identities bool `json:"identities,omitempty"`
+
+ // Nodes synchronization status
+ Nodes bool `json:"nodes,omitempty"`
+
+ // Services synchronization status
+ Services bool `json:"services,omitempty"`
+}
+
+// Validate validates this remote cluster synced
+func (m *RemoteClusterSynced) Validate(formats strfmt.Registry) error {
+ return nil
+}
+
+// ContextValidate validates this remote cluster synced based on context it is used
+func (m *RemoteClusterSynced) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
+ return nil
+}
+
+// MarshalBinary interface implementation
+func (m *RemoteClusterSynced) MarshalBinary() ([]byte, error) {
+ if m == nil {
+ return nil, nil
+ }
+ return swag.WriteJSON(m)
+}
+
+// UnmarshalBinary interface implementation
+func (m *RemoteClusterSynced) UnmarshalBinary(b []byte) error {
+ var res RemoteClusterSynced
+ if err := swag.ReadJSON(b, &res); err != nil {
+ return err
+ }
+ *m = res
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/api/v1/models/selector_identity_mapping.go b/vendor/github.com/cilium/cilium/api/v1/models/selector_identity_mapping.go
index 400dfc334..0a62efa4b 100644
--- a/vendor/github.com/cilium/cilium/api/v1/models/selector_identity_mapping.go
+++ b/vendor/github.com/cilium/cilium/api/v1/models/selector_identity_mapping.go
@@ -23,6 +23,9 @@ type SelectorIdentityMapping struct {
// identities mapping to this selector
Identities []int64 `json:"identities"`
+ // Labels are the metadata labels associated with the selector
+ Labels interface{} `json:"labels,omitempty"`
+
// string form of selector
Selector string `json:"selector,omitempty"`
diff --git a/vendor/github.com/cilium/cilium/api/v1/models/status_response.go b/vendor/github.com/cilium/cilium/api/v1/models/status_response.go
index 1073dd527..d661530eb 100644
--- a/vendor/github.com/cilium/cilium/api/v1/models/status_response.go
+++ b/vendor/github.com/cilium/cilium/api/v1/models/status_response.go
@@ -24,6 +24,9 @@ import (
// swagger:model StatusResponse
type StatusResponse struct {
+ // Status of Mutual Authentication certificate provider
+ AuthCertificateProvider *Status `json:"auth-certificate-provider,omitempty"`
+
// Status of bandwidth manager
BandwidthManager *BandwidthManager `json:"bandwidth-manager,omitempty"`
@@ -110,6 +113,10 @@ type StatusResponse struct {
func (m *StatusResponse) Validate(formats strfmt.Registry) error {
var res []error
+ if err := m.validateAuthCertificateProvider(formats); err != nil {
+ res = append(res, err)
+ }
+
if err := m.validateBandwidthManager(formats); err != nil {
res = append(res, err)
}
@@ -216,6 +223,25 @@ func (m *StatusResponse) Validate(formats strfmt.Registry) error {
return nil
}
+func (m *StatusResponse) validateAuthCertificateProvider(formats strfmt.Registry) error {
+ if swag.IsZero(m.AuthCertificateProvider) { // not required
+ return nil
+ }
+
+ if m.AuthCertificateProvider != nil {
+ if err := m.AuthCertificateProvider.Validate(formats); err != nil {
+ if ve, ok := err.(*errors.Validation); ok {
+ return ve.ValidateName("auth-certificate-provider")
+ } else if ce, ok := err.(*errors.CompositeError); ok {
+ return ce.ValidateName("auth-certificate-provider")
+ }
+ return err
+ }
+ }
+
+ return nil
+}
+
func (m *StatusResponse) validateBandwidthManager(formats strfmt.Registry) error {
if swag.IsZero(m.BandwidthManager) { // not required
return nil
@@ -690,6 +716,10 @@ func (m *StatusResponse) validateStale(formats strfmt.Registry) error {
func (m *StatusResponse) ContextValidate(ctx context.Context, formats strfmt.Registry) error {
var res []error
+ if err := m.contextValidateAuthCertificateProvider(ctx, formats); err != nil {
+ res = append(res, err)
+ }
+
if err := m.contextValidateBandwidthManager(ctx, formats); err != nil {
res = append(res, err)
}
@@ -792,6 +822,22 @@ func (m *StatusResponse) ContextValidate(ctx context.Context, formats strfmt.Reg
return nil
}
+func (m *StatusResponse) contextValidateAuthCertificateProvider(ctx context.Context, formats strfmt.Registry) error {
+
+ if m.AuthCertificateProvider != nil {
+ if err := m.AuthCertificateProvider.ContextValidate(ctx, formats); err != nil {
+ if ve, ok := err.(*errors.Validation); ok {
+ return ve.ValidateName("auth-certificate-provider")
+ } else if ce, ok := err.(*errors.CompositeError); ok {
+ return ce.ValidateName("auth-certificate-provider")
+ }
+ return err
+ }
+ }
+
+ return nil
+}
+
func (m *StatusResponse) contextValidateBandwidthManager(ctx context.Context, formats strfmt.Registry) error {
if m.BandwidthManager != nil {
diff --git a/vendor/github.com/cilium/cilium/api/v1/models/wireguard_interface.go b/vendor/github.com/cilium/cilium/api/v1/models/wireguard_interface.go
index f73b640ad..d78acc53d 100644
--- a/vendor/github.com/cilium/cilium/api/v1/models/wireguard_interface.go
+++ b/vendor/github.com/cilium/cilium/api/v1/models/wireguard_interface.go
@@ -17,14 +17,14 @@ import (
"github.com/go-openapi/swag"
)
-// WireguardInterface Status of a Wireguard interface
+// WireguardInterface Status of a WireGuard interface
//
// +k8s:deepcopy-gen=true
//
// swagger:model WireguardInterface
type WireguardInterface struct {
- // Port on which the Wireguard endpoint is exposed
+ // Port on which the WireGuard endpoint is exposed
ListenPort int64 `json:"listen-port,omitempty"`
// Name of the interface
@@ -33,7 +33,7 @@ type WireguardInterface struct {
// Number of peers configured on this interface
PeerCount int64 `json:"peer-count,omitempty"`
- // Optional list of wireguard peers
+ // Optional list of WireGuard peers
Peers []*WireguardPeer `json:"peers"`
// Public key of this interface
diff --git a/vendor/github.com/cilium/cilium/api/v1/models/wireguard_peer.go b/vendor/github.com/cilium/cilium/api/v1/models/wireguard_peer.go
index f1c7c1235..7d5664e2e 100644
--- a/vendor/github.com/cilium/cilium/api/v1/models/wireguard_peer.go
+++ b/vendor/github.com/cilium/cilium/api/v1/models/wireguard_peer.go
@@ -17,7 +17,7 @@ import (
"github.com/go-openapi/validate"
)
-// WireguardPeer Status of a Wireguard peer
+// WireguardPeer Status of a WireGuard peer
//
// +k8s:deepcopy-gen=true
//
diff --git a/vendor/github.com/cilium/cilium/api/v1/models/wireguard_status.go b/vendor/github.com/cilium/cilium/api/v1/models/wireguard_status.go
index 98285fb5b..041a2d336 100644
--- a/vendor/github.com/cilium/cilium/api/v1/models/wireguard_status.go
+++ b/vendor/github.com/cilium/cilium/api/v1/models/wireguard_status.go
@@ -17,14 +17,14 @@ import (
"github.com/go-openapi/swag"
)
-// WireguardStatus Status of the Wireguard agent
+// WireguardStatus Status of the WireGuard agent
//
// +k8s:deepcopy-gen=true
//
// swagger:model WireguardStatus
type WireguardStatus struct {
- // Wireguard interfaces managed by this Cilium instance
+ // WireGuard interfaces managed by this Cilium instance
Interfaces []*WireguardInterface `json:"interfaces"`
// Node Encryption status
diff --git a/vendor/github.com/cilium/cilium/api/v1/models/zz_generated.deepcopy.go b/vendor/github.com/cilium/cilium/api/v1/models/zz_generated.deepcopy.go
index ddceb9e6f..03d13fde3 100644
--- a/vendor/github.com/cilium/cilium/api/v1/models/zz_generated.deepcopy.go
+++ b/vendor/github.com/cilium/cilium/api/v1/models/zz_generated.deepcopy.go
@@ -1132,7 +1132,17 @@ func (in *ProxyStatus) DeepCopy() *ProxyStatus {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RemoteCluster) DeepCopyInto(out *RemoteCluster) {
*out = *in
+ if in.Config != nil {
+ in, out := &in.Config, &out.Config
+ *out = new(RemoteClusterConfig)
+ **out = **in
+ }
in.LastFailure.DeepCopyInto(&out.LastFailure)
+ if in.Synced != nil {
+ in, out := &in.Synced, &out.Synced
+ *out = new(RemoteClusterSynced)
+ **out = **in
+ }
return
}
@@ -1146,6 +1156,38 @@ func (in *RemoteCluster) DeepCopy() *RemoteCluster {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RemoteClusterConfig) DeepCopyInto(out *RemoteClusterConfig) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoteClusterConfig.
+func (in *RemoteClusterConfig) DeepCopy() *RemoteClusterConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(RemoteClusterConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RemoteClusterSynced) DeepCopyInto(out *RemoteClusterSynced) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoteClusterSynced.
+func (in *RemoteClusterSynced) DeepCopy() *RemoteClusterSynced {
+ if in == nil {
+ return nil
+ }
+ out := new(RemoteClusterSynced)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RequestResponseStatistics) DeepCopyInto(out *RequestResponseStatistics) {
*out = *in
@@ -1175,6 +1217,11 @@ func (in *RequestResponseStatistics) DeepCopy() *RequestResponseStatistics {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StatusResponse) DeepCopyInto(out *StatusResponse) {
*out = *in
+ if in.AuthCertificateProvider != nil {
+ in, out := &in.AuthCertificateProvider, &out.AuthCertificateProvider
+ *out = new(Status)
+ **out = **in
+ }
if in.BandwidthManager != nil {
in, out := &in.BandwidthManager, &out.BandwidthManager
*out = new(BandwidthManager)
diff --git a/vendor/github.com/cilium/cilium/api/v1/models/zz_generated.deepequal.go b/vendor/github.com/cilium/cilium/api/v1/models/zz_generated.deepequal.go
index 3996ca838..130475ae4 100644
--- a/vendor/github.com/cilium/cilium/api/v1/models/zz_generated.deepequal.go
+++ b/vendor/github.com/cilium/cilium/api/v1/models/zz_generated.deepequal.go
@@ -58,6 +58,9 @@ func (in *EndpointIdentifiers) DeepEqual(other *EndpointIdentifiers) bool {
return false
}
+ if in.CniAttachmentID != other.CniAttachmentID {
+ return false
+ }
if in.ContainerID != other.ContainerID {
return false
}
diff --git a/vendor/github.com/cilium/cilium/api/v1/observer/README.md b/vendor/github.com/cilium/cilium/api/v1/observer/README.md
index 2561f1f2a..74d8c687d 100644
--- a/vendor/github.com/cilium/cilium/api/v1/observer/README.md
+++ b/vendor/github.com/cilium/cilium/api/v1/observer/README.md
@@ -145,6 +145,7 @@ GetDebugEventsResponse contains a Cilium datapath debug events.
| since | [google.protobuf.Timestamp](#google-protobuf-Timestamp) | | Since this time for returned flows. Incompatible with `number`. |
| until | [google.protobuf.Timestamp](#google-protobuf-Timestamp) | | Until this time for returned flows. Incompatible with `number`. |
| experimental | [GetFlowsRequest.Experimental](#observer-GetFlowsRequest-Experimental) | | |
+| extensions | [google.protobuf.Any](#google-protobuf-Any) | | extensions can be used to add arbitrary additional metadata to GetFlowsRequest. This can be used to extend functionality for other Hubble compatible APIs, or experiment with new functionality without needing to change the public API. |
@@ -301,6 +302,7 @@ Node represents a cluster node.
| num_unavailable_nodes | [google.protobuf.UInt32Value](#google-protobuf-UInt32Value) | | number of nodes for which a connection cannot be established |
| unavailable_nodes | [string](#string) | repeated | list of nodes that are unavailable This list may not be exhaustive. |
| version | [string](#string) | | Version is the version of Cilium/Hubble. |
+| flows_rate | [double](#double) | | Approximate rate of flows seen by Hubble per second over the last minute. In a multi-node context, this is the sum of all flows rates. |
diff --git a/vendor/github.com/cilium/cilium/api/v1/observer/observer.pb.go b/vendor/github.com/cilium/cilium/api/v1/observer/observer.pb.go
index 7671333e2..32597d059 100644
--- a/vendor/github.com/cilium/cilium/api/v1/observer/observer.pb.go
+++ b/vendor/github.com/cilium/cilium/api/v1/observer/observer.pb.go
@@ -3,8 +3,8 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.30.0
-// protoc v4.22.3
+// protoc-gen-go v1.31.0
+// protoc v4.24.0
// source: observer/observer.proto
package observer
@@ -14,6 +14,7 @@ import (
relay "github.com/cilium/cilium/api/v1/relay"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ anypb "google.golang.org/protobuf/types/known/anypb"
fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb"
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
@@ -168,6 +169,9 @@ const DropReason_SNAT_NO_MAP_FOUND = flow.DropReason_SNAT_NO_MAP_FOUND
const DropReason_INVALID_CLUSTER_ID = flow.DropReason_INVALID_CLUSTER_ID
const DropReason_UNSUPPORTED_PROTOCOL_FOR_DSR_ENCAP = flow.DropReason_UNSUPPORTED_PROTOCOL_FOR_DSR_ENCAP
const DropReason_NO_EGRESS_GATEWAY = flow.DropReason_NO_EGRESS_GATEWAY
+const DropReason_UNENCRYPTED_TRAFFIC = flow.DropReason_UNENCRYPTED_TRAFFIC
+const DropReason_TTL_EXCEEDED = flow.DropReason_TTL_EXCEEDED
+const DropReason_NO_NODE_ID = flow.DropReason_NO_NODE_ID
var DropReason_name = flow.DropReason_name
var DropReason_value = flow.DropReason_value
@@ -311,6 +315,8 @@ const DebugEventType_DBG_INHERIT_IDENTITY = flow.DebugEventType_DBG_INHERIT_IDEN
const DebugEventType_DBG_SK_LOOKUP4 = flow.DebugEventType_DBG_SK_LOOKUP4
const DebugEventType_DBG_SK_LOOKUP6 = flow.DebugEventType_DBG_SK_LOOKUP6
const DebugEventType_DBG_SK_ASSIGN = flow.DebugEventType_DBG_SK_ASSIGN
+const DebugEventType_DBG_L7_LB = flow.DebugEventType_DBG_L7_LB
+const DebugEventType_DBG_SKIP_POLICY = flow.DebugEventType_DBG_SKIP_POLICY
var DebugEventType_name = flow.DebugEventType_name
var DebugEventType_value = flow.DebugEventType_value
@@ -338,6 +344,7 @@ type UDP = flow.UDP
type SCTP = flow.SCTP
type ICMPv4 = flow.ICMPv4
type ICMPv6 = flow.ICMPv6
+type Policy = flow.Policy
type EventTypeFilter = flow.EventTypeFilter
type CiliumEventType = flow.CiliumEventType
type FlowFilter = flow.FlowFilter
@@ -436,6 +443,9 @@ type ServerStatusResponse struct {
UnavailableNodes []string `protobuf:"bytes,7,rep,name=unavailable_nodes,json=unavailableNodes,proto3" json:"unavailable_nodes,omitempty"`
// Version is the version of Cilium/Hubble.
Version string `protobuf:"bytes,8,opt,name=version,proto3" json:"version,omitempty"`
+ // Approximate rate of flows seen by Hubble per second over the last minute.
+ // In a multi-node context, this is the sum of all flows rates.
+ FlowsRate float64 `protobuf:"fixed64,9,opt,name=flows_rate,json=flowsRate,proto3" json:"flows_rate,omitempty"`
}
func (x *ServerStatusResponse) Reset() {
@@ -526,6 +536,13 @@ func (x *ServerStatusResponse) GetVersion() string {
return ""
}
+func (x *ServerStatusResponse) GetFlowsRate() float64 {
+ if x != nil {
+ return x.FlowsRate
+ }
+ return 0
+}
+
type GetFlowsRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -560,6 +577,10 @@ type GetFlowsRequest struct {
// Until this time for returned flows. Incompatible with `number`.
Until *timestamppb.Timestamp `protobuf:"bytes,8,opt,name=until,proto3" json:"until,omitempty"`
Experimental *GetFlowsRequest_Experimental `protobuf:"bytes,999,opt,name=experimental,proto3" json:"experimental,omitempty"`
+ // extensions can be used to add arbitrary additional metadata to GetFlowsRequest.
+ // This can be used to extend functionality for other Hubble compatible
+ // APIs, or experiment with new functionality without needing to change the public API.
+ Extensions *anypb.Any `protobuf:"bytes,150000,opt,name=extensions,proto3" json:"extensions,omitempty"`
}
func (x *GetFlowsRequest) Reset() {
@@ -650,6 +671,13 @@ func (x *GetFlowsRequest) GetExperimental() *GetFlowsRequest_Experimental {
return nil
}
+func (x *GetFlowsRequest) GetExtensions() *anypb.Any {
+ if x != nil {
+ return x.Extensions
+ }
+ return nil
+}
+
// GetFlowsResponse contains either a flow or a protocol message.
type GetFlowsResponse struct {
state protoimpl.MessageState
@@ -1692,69 +1720,173 @@ var File_observer_observer_proto protoreflect.FileDescriptor
var file_observer_observer_proto_rawDesc = []byte{
0x0a, 0x17, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x6f, 0x62, 0x73, 0x65, 0x72,
0x76, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x6f, 0x62, 0x73, 0x65, 0x72,
- 0x76, 0x65, 0x72, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b,
- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x0f, 0x66, 0x6c, 0x6f, 0x77, 0x2f, 0x66, 0x6c, 0x6f,
- 0x77, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x11, 0x72, 0x65, 0x6c, 0x61, 0x79, 0x2f, 0x72,
- 0x65, 0x6c, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x15, 0x0a, 0x13, 0x53, 0x65,
- 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x22, 0xf3, 0x02, 0x0a, 0x14, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74,
- 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x75,
- 0x6d, 0x5f, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x6e,
- 0x75, 0x6d, 0x46, 0x6c, 0x6f, 0x77, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x66,
- 0x6c, 0x6f, 0x77, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x46,
- 0x6c, 0x6f, 0x77, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x65, 0x6e, 0x5f, 0x66, 0x6c, 0x6f,
- 0x77, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x73, 0x65, 0x65, 0x6e, 0x46, 0x6c,
- 0x6f, 0x77, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6e, 0x73,
- 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x4e, 0x73,
- 0x12, 0x4c, 0x0a, 0x13, 0x6e, 0x75, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65,
- 0x64, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
- 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x11, 0x6e, 0x75, 0x6d,
- 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x12, 0x50,
- 0x0a, 0x15, 0x6e, 0x75, 0x6d, 0x5f, 0x75, 0x6e, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c,
- 0x65, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e,
+ 0x76, 0x65, 0x72, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f,
+ 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f,
+ 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
+ 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x1a, 0x0f, 0x66, 0x6c, 0x6f, 0x77, 0x2f, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x1a, 0x11, 0x72, 0x65, 0x6c, 0x61, 0x79, 0x2f, 0x72, 0x65, 0x6c, 0x61, 0x79, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x15, 0x0a, 0x13, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53,
+ 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x92, 0x03, 0x0a,
+ 0x14, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x75, 0x6d, 0x5f, 0x66, 0x6c, 0x6f,
+ 0x77, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x6e, 0x75, 0x6d, 0x46, 0x6c, 0x6f,
+ 0x77, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x46, 0x6c, 0x6f, 0x77, 0x73, 0x12,
+ 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x65, 0x6e, 0x5f, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x04, 0x52, 0x09, 0x73, 0x65, 0x65, 0x6e, 0x46, 0x6c, 0x6f, 0x77, 0x73, 0x12, 0x1b,
+ 0x0a, 0x09, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28,
+ 0x04, 0x52, 0x08, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x4e, 0x73, 0x12, 0x4c, 0x0a, 0x13, 0x6e,
+ 0x75, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x6e, 0x6f, 0x64,
+ 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33,
+ 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x11, 0x6e, 0x75, 0x6d, 0x43, 0x6f, 0x6e, 0x6e, 0x65,
+ 0x63, 0x74, 0x65, 0x64, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x12, 0x50, 0x0a, 0x15, 0x6e, 0x75, 0x6d,
+ 0x5f, 0x75, 0x6e, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x6f, 0x64,
+ 0x65, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33,
+ 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x13, 0x6e, 0x75, 0x6d, 0x55, 0x6e, 0x61, 0x76, 0x61,
+ 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x75,
+ 0x6e, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x73,
+ 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x75, 0x6e, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61,
+ 0x62, 0x6c, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73,
+ 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69,
+ 0x6f, 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x5f, 0x72, 0x61, 0x74, 0x65,
+ 0x18, 0x09, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x61, 0x74,
+ 0x65, 0x22, 0xf1, 0x03, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x46, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x14, 0x0a,
+ 0x05, 0x66, 0x69, 0x72, 0x73, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x69,
+ 0x72, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x6c, 0x6c, 0x6f, 0x77, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x08, 0x52, 0x06, 0x66, 0x6f, 0x6c, 0x6c, 0x6f, 0x77, 0x12, 0x2e, 0x0a, 0x09, 0x62,
+ 0x6c, 0x61, 0x63, 0x6b, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10,
+ 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x46, 0x6c, 0x6f, 0x77, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72,
+ 0x52, 0x09, 0x62, 0x6c, 0x61, 0x63, 0x6b, 0x6c, 0x69, 0x73, 0x74, 0x12, 0x2e, 0x0a, 0x09, 0x77,
+ 0x68, 0x69, 0x74, 0x65, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10,
+ 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x46, 0x6c, 0x6f, 0x77, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72,
+ 0x52, 0x09, 0x77, 0x68, 0x69, 0x74, 0x65, 0x6c, 0x69, 0x73, 0x74, 0x12, 0x30, 0x0a, 0x05, 0x73,
+ 0x69, 0x6e, 0x63, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d,
+ 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x05, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x12, 0x30, 0x0a,
+ 0x05, 0x75, 0x6e, 0x74, 0x69, 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54,
+ 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x05, 0x75, 0x6e, 0x74, 0x69, 0x6c, 0x12,
+ 0x4b, 0x0a, 0x0c, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18,
+ 0xe7, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65,
+ 0x72, 0x2e, 0x47, 0x65, 0x74, 0x46, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x2e, 0x45, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x52, 0x0c,
+ 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x36, 0x0a, 0x0a,
+ 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xf0, 0x93, 0x09, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73,
+ 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x49, 0x0a, 0x0c, 0x45, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65,
+ 0x6e, 0x74, 0x61, 0x6c, 0x12, 0x39, 0x0a, 0x0a, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61,
+ 0x73, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64,
+ 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x09, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x4a,
+ 0x04, 0x08, 0x02, 0x10, 0x03, 0x22, 0x84, 0x02, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x46, 0x6c, 0x6f,
+ 0x77, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x20, 0x0a, 0x04, 0x66, 0x6c,
+ 0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e,
+ 0x46, 0x6c, 0x6f, 0x77, 0x48, 0x00, 0x52, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x39, 0x0a, 0x0b,
+ 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x16, 0x2e, 0x72, 0x65, 0x6c, 0x61, 0x79, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x74,
+ 0x61, 0x74, 0x75, 0x73, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x6e, 0x6f, 0x64,
+ 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x32, 0x0a, 0x0b, 0x6c, 0x6f, 0x73, 0x74, 0x5f,
+ 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x66,
+ 0x6c, 0x6f, 0x77, 0x2e, 0x4c, 0x6f, 0x73, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x48, 0x00, 0x52,
+ 0x0a, 0x6c, 0x6f, 0x73, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x6e,
+ 0x6f, 0x64, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0xe8, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x08, 0x6e, 0x6f, 0x64, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x04, 0x74, 0x69, 0x6d,
+ 0x65, 0x18, 0xe9, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73,
+ 0x74, 0x61, 0x6d, 0x70, 0x52, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x42, 0x10, 0x0a, 0x0e, 0x72, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x22, 0xc1, 0x01, 0x0a,
+ 0x15, 0x47, 0x65, 0x74, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x14,
+ 0x0a, 0x05, 0x66, 0x69, 0x72, 0x73, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66,
+ 0x69, 0x72, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x6c, 0x6c, 0x6f, 0x77, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x66, 0x6f, 0x6c, 0x6c, 0x6f, 0x77, 0x12, 0x30, 0x0a, 0x05,
+ 0x73, 0x69, 0x6e, 0x63, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69,
+ 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x05, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x12, 0x30,
+ 0x0a, 0x05, 0x75, 0x6e, 0x74, 0x69, 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
- 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x13, 0x6e, 0x75, 0x6d,
- 0x55, 0x6e, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x73,
- 0x12, 0x2b, 0x0a, 0x11, 0x75, 0x6e, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x5f,
- 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x75, 0x6e, 0x61,
- 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x12, 0x18, 0x0a,
- 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07,
- 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xb9, 0x03, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x46,
- 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6e,
- 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6e, 0x75, 0x6d,
- 0x62, 0x65, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x69, 0x72, 0x73, 0x74, 0x18, 0x09, 0x20, 0x01,
- 0x28, 0x08, 0x52, 0x05, 0x66, 0x69, 0x72, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x6c,
- 0x6c, 0x6f, 0x77, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x66, 0x6f, 0x6c, 0x6c, 0x6f,
- 0x77, 0x12, 0x2e, 0x0a, 0x09, 0x62, 0x6c, 0x61, 0x63, 0x6b, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x05,
- 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x46, 0x6c, 0x6f, 0x77,
- 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x09, 0x62, 0x6c, 0x61, 0x63, 0x6b, 0x6c, 0x69, 0x73,
- 0x74, 0x12, 0x2e, 0x0a, 0x09, 0x77, 0x68, 0x69, 0x74, 0x65, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x06,
- 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x46, 0x6c, 0x6f, 0x77,
- 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x09, 0x77, 0x68, 0x69, 0x74, 0x65, 0x6c, 0x69, 0x73,
- 0x74, 0x12, 0x30, 0x0a, 0x05, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
- 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x05, 0x73, 0x69,
- 0x6e, 0x63, 0x65, 0x12, 0x30, 0x0a, 0x05, 0x75, 0x6e, 0x74, 0x69, 0x6c, 0x18, 0x08, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x05,
- 0x75, 0x6e, 0x74, 0x69, 0x6c, 0x12, 0x4b, 0x0a, 0x0c, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d,
- 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0xe7, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x6f,
- 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x47, 0x65, 0x74, 0x46, 0x6c, 0x6f, 0x77, 0x73,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x45, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65,
- 0x6e, 0x74, 0x61, 0x6c, 0x52, 0x0c, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74,
- 0x61, 0x6c, 0x1a, 0x49, 0x0a, 0x0c, 0x45, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74,
- 0x61, 0x6c, 0x12, 0x39, 0x0a, 0x0a, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61,
- 0x73, 0x6b, 0x52, 0x09, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x4a, 0x04, 0x08,
- 0x02, 0x10, 0x03, 0x22, 0x84, 0x02, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x46, 0x6c, 0x6f, 0x77, 0x73,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x20, 0x0a, 0x04, 0x66, 0x6c, 0x6f, 0x77,
+ 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x05, 0x75, 0x6e, 0x74, 0x69, 0x6c,
+ 0x22, 0x9a, 0x01, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x45, 0x76, 0x65,
+ 0x6e, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, 0x0a, 0x0b, 0x61,
+ 0x67, 0x65, 0x6e, 0x74, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x10, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x45, 0x76, 0x65,
+ 0x6e, 0x74, 0x52, 0x0a, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x1c,
+ 0x0a, 0x09, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0xe8, 0x07, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x08, 0x6e, 0x6f, 0x64, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x04,
+ 0x74, 0x69, 0x6d, 0x65, 0x18, 0xe9, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69,
+ 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x22, 0xc1, 0x01,
+ 0x0a, 0x15, 0x47, 0x65, 0x74, 0x44, 0x65, 0x62, 0x75, 0x67, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65,
+ 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12,
+ 0x14, 0x0a, 0x05, 0x66, 0x69, 0x72, 0x73, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05,
+ 0x66, 0x69, 0x72, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x6c, 0x6c, 0x6f, 0x77, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x66, 0x6f, 0x6c, 0x6c, 0x6f, 0x77, 0x12, 0x30, 0x0a,
+ 0x05, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54,
+ 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x05, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x12,
+ 0x30, 0x0a, 0x05, 0x75, 0x6e, 0x74, 0x69, 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x05, 0x75, 0x6e, 0x74, 0x69,
+ 0x6c, 0x22, 0x9a, 0x01, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x44, 0x65, 0x62, 0x75, 0x67, 0x45, 0x76,
+ 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, 0x0a, 0x0b,
+ 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x10, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x44, 0x65, 0x62, 0x75, 0x67, 0x45, 0x76,
+ 0x65, 0x6e, 0x74, 0x52, 0x0a, 0x64, 0x65, 0x62, 0x75, 0x67, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12,
+ 0x1c, 0x0a, 0x09, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0xe8, 0x07, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x6f, 0x64, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a,
+ 0x04, 0x74, 0x69, 0x6d, 0x65, 0x18, 0xe9, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54,
+ 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x22, 0x11,
+ 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x22, 0x38, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x05, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x01,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e,
+ 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x05, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x22, 0x8d, 0x02, 0x0a, 0x04,
+ 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73,
+ 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69,
+ 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x26, 0x0a, 0x05,
+ 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x72, 0x65,
+ 0x6c, 0x61, 0x79, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x73,
+ 0x74, 0x61, 0x74, 0x65, 0x12, 0x1f, 0x0a, 0x03, 0x74, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x0d, 0x2e, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x54, 0x4c, 0x53,
+ 0x52, 0x03, 0x74, 0x6c, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x5f,
+ 0x6e, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65,
+ 0x4e, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x75, 0x6d, 0x5f, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x18,
+ 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x6e, 0x75, 0x6d, 0x46, 0x6c, 0x6f, 0x77, 0x73, 0x12,
+ 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x18, 0x08, 0x20, 0x01,
+ 0x28, 0x04, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x46, 0x6c, 0x6f, 0x77, 0x73, 0x12, 0x1d, 0x0a, 0x0a,
+ 0x73, 0x65, 0x65, 0x6e, 0x5f, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x04,
+ 0x52, 0x09, 0x73, 0x65, 0x65, 0x6e, 0x46, 0x6c, 0x6f, 0x77, 0x73, 0x22, 0x40, 0x0a, 0x03, 0x54,
+ 0x4c, 0x53, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x1f, 0x0a, 0x0b,
+ 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x16, 0x0a,
+ 0x14, 0x47, 0x65, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x4c, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x4e, 0x61, 0x6d, 0x65,
+ 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x33,
+ 0x0a, 0x0a, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x4e, 0x61,
+ 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x0a, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61,
+ 0x63, 0x65, 0x73, 0x22, 0x43, 0x0a, 0x09, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65,
+ 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61,
+ 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e,
+ 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0xe9, 0x02, 0x0a, 0x0b, 0x45, 0x78, 0x70,
+ 0x6f, 0x72, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x20, 0x0a, 0x04, 0x66, 0x6c, 0x6f, 0x77,
0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x46, 0x6c,
0x6f, 0x77, 0x48, 0x00, 0x52, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x39, 0x0a, 0x0b, 0x6e, 0x6f,
0x64, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
@@ -1763,151 +1895,54 @@ var file_observer_observer_proto_rawDesc = []byte{
0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x32, 0x0a, 0x0b, 0x6c, 0x6f, 0x73, 0x74, 0x5f, 0x65, 0x76,
0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x66, 0x6c, 0x6f,
0x77, 0x2e, 0x4c, 0x6f, 0x73, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x6c,
- 0x6f, 0x73, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x6f, 0x64,
- 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0xe8, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6e,
- 0x6f, 0x64, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x18,
- 0xe9, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
- 0x6d, 0x70, 0x52, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x42, 0x10, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x22, 0xc1, 0x01, 0x0a, 0x15, 0x47,
- 0x65, 0x74, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x14, 0x0a, 0x05,
- 0x66, 0x69, 0x72, 0x73, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x69, 0x72,
- 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x6c, 0x6c, 0x6f, 0x77, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x08, 0x52, 0x06, 0x66, 0x6f, 0x6c, 0x6c, 0x6f, 0x77, 0x12, 0x30, 0x0a, 0x05, 0x73, 0x69,
- 0x6e, 0x63, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65,
- 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x05, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x12, 0x30, 0x0a, 0x05,
- 0x75, 0x6e, 0x74, 0x69, 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69,
- 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x05, 0x75, 0x6e, 0x74, 0x69, 0x6c, 0x22, 0x9a,
- 0x01, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74,
- 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, 0x0a, 0x0b, 0x61, 0x67, 0x65,
- 0x6e, 0x74, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10,
+ 0x6f, 0x73, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x33, 0x0a, 0x0b, 0x61, 0x67, 0x65,
+ 0x6e, 0x74, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10,
0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74,
- 0x52, 0x0a, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x1c, 0x0a, 0x09,
- 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0xe8, 0x07, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x08, 0x6e, 0x6f, 0x64, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x04, 0x74, 0x69,
- 0x6d, 0x65, 0x18, 0xe9, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65,
- 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x22, 0xc1, 0x01, 0x0a, 0x15,
+ 0x48, 0x00, 0x52, 0x0a, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x33,
+ 0x0a, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x05, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x44, 0x65, 0x62, 0x75, 0x67,
+ 0x45, 0x76, 0x65, 0x6e, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x64, 0x65, 0x62, 0x75, 0x67, 0x45, 0x76,
+ 0x65, 0x6e, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65,
+ 0x18, 0xe8, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x6f, 0x64, 0x65, 0x4e, 0x61, 0x6d,
+ 0x65, 0x12, 0x2f, 0x0a, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x18, 0xe9, 0x07, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x04, 0x74, 0x69,
+ 0x6d, 0x65, 0x42, 0x10, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x74,
+ 0x79, 0x70, 0x65, 0x73, 0x32, 0xed, 0x03, 0x0a, 0x08, 0x4f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65,
+ 0x72, 0x12, 0x45, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x46, 0x6c, 0x6f, 0x77, 0x73, 0x12, 0x19, 0x2e,
+ 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x47, 0x65, 0x74, 0x46, 0x6c, 0x6f, 0x77,
+ 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x6f, 0x62, 0x73, 0x65, 0x72,
+ 0x76, 0x65, 0x72, 0x2e, 0x47, 0x65, 0x74, 0x46, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x57, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x41,
+ 0x67, 0x65, 0x6e, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x1f, 0x2e, 0x6f, 0x62, 0x73,
+ 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x45, 0x76,
+ 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x6f, 0x62,
+ 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x45,
+ 0x76, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30,
+ 0x01, 0x12, 0x57, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x44, 0x65, 0x62, 0x75, 0x67, 0x45, 0x76, 0x65,
+ 0x6e, 0x74, 0x73, 0x12, 0x1f, 0x2e, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x47,
+ 0x65, 0x74, 0x44, 0x65, 0x62, 0x75, 0x67, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e,
0x47, 0x65, 0x74, 0x44, 0x65, 0x62, 0x75, 0x67, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x14, 0x0a,
- 0x05, 0x66, 0x69, 0x72, 0x73, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x69,
- 0x72, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x6c, 0x6c, 0x6f, 0x77, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x08, 0x52, 0x06, 0x66, 0x6f, 0x6c, 0x6c, 0x6f, 0x77, 0x12, 0x30, 0x0a, 0x05, 0x73,
- 0x69, 0x6e, 0x63, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d,
- 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x05, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x12, 0x30, 0x0a,
- 0x05, 0x75, 0x6e, 0x74, 0x69, 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54,
- 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x05, 0x75, 0x6e, 0x74, 0x69, 0x6c, 0x22,
- 0x9a, 0x01, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x44, 0x65, 0x62, 0x75, 0x67, 0x45, 0x76, 0x65, 0x6e,
- 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, 0x0a, 0x0b, 0x64, 0x65,
- 0x62, 0x75, 0x67, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x10, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x44, 0x65, 0x62, 0x75, 0x67, 0x45, 0x76, 0x65, 0x6e,
- 0x74, 0x52, 0x0a, 0x64, 0x65, 0x62, 0x75, 0x67, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x1c, 0x0a,
- 0x09, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0xe8, 0x07, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x08, 0x6e, 0x6f, 0x64, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x04, 0x74,
- 0x69, 0x6d, 0x65, 0x18, 0xe9, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d,
- 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x22, 0x11, 0x0a, 0x0f,
- 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22,
- 0x38, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x05, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03,
- 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x4e, 0x6f,
- 0x64, 0x65, 0x52, 0x05, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x22, 0x8d, 0x02, 0x0a, 0x04, 0x4e, 0x6f,
- 0x64, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f,
- 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e,
- 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x26, 0x0a, 0x05, 0x73, 0x74,
- 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x72, 0x65, 0x6c, 0x61,
- 0x79, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61,
- 0x74, 0x65, 0x12, 0x1f, 0x0a, 0x03, 0x74, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x0d, 0x2e, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x54, 0x4c, 0x53, 0x52, 0x03,
- 0x74, 0x6c, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6e, 0x73,
- 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x4e, 0x73,
- 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x75, 0x6d, 0x5f, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x18, 0x07, 0x20,
- 0x01, 0x28, 0x04, 0x52, 0x08, 0x6e, 0x75, 0x6d, 0x46, 0x6c, 0x6f, 0x77, 0x73, 0x12, 0x1b, 0x0a,
- 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x04,
- 0x52, 0x08, 0x6d, 0x61, 0x78, 0x46, 0x6c, 0x6f, 0x77, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65,
- 0x65, 0x6e, 0x5f, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09,
- 0x73, 0x65, 0x65, 0x6e, 0x46, 0x6c, 0x6f, 0x77, 0x73, 0x22, 0x40, 0x0a, 0x03, 0x54, 0x4c, 0x53,
- 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x65,
- 0x72, 0x76, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x16, 0x0a, 0x14, 0x47,
- 0x65, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x22, 0x4c, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70,
- 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x0a,
- 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b,
- 0x32, 0x13, 0x2e, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x4e, 0x61, 0x6d, 0x65,
- 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x0a, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65,
- 0x73, 0x22, 0x43, 0x0a, 0x09, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x18,
- 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65,
- 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d,
- 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0xe9, 0x02, 0x0a, 0x0b, 0x45, 0x78, 0x70, 0x6f, 0x72,
- 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x20, 0x0a, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x46, 0x6c, 0x6f, 0x77,
- 0x48, 0x00, 0x52, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x39, 0x0a, 0x0b, 0x6e, 0x6f, 0x64, 0x65,
- 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e,
- 0x72, 0x65, 0x6c, 0x61, 0x79, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
- 0x45, 0x76, 0x65, 0x6e, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x6e, 0x6f, 0x64, 0x65, 0x53, 0x74, 0x61,
- 0x74, 0x75, 0x73, 0x12, 0x32, 0x0a, 0x0b, 0x6c, 0x6f, 0x73, 0x74, 0x5f, 0x65, 0x76, 0x65, 0x6e,
- 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e,
- 0x4c, 0x6f, 0x73, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x6c, 0x6f, 0x73,
- 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x33, 0x0a, 0x0b, 0x61, 0x67, 0x65, 0x6e, 0x74,
- 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x66,
- 0x6c, 0x6f, 0x77, 0x2e, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x48, 0x00,
- 0x52, 0x0a, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x33, 0x0a, 0x0b,
- 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x10, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x44, 0x65, 0x62, 0x75, 0x67, 0x45, 0x76,
- 0x65, 0x6e, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x64, 0x65, 0x62, 0x75, 0x67, 0x45, 0x76, 0x65, 0x6e,
- 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0xe8,
- 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x6f, 0x64, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12,
- 0x2f, 0x0a, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x18, 0xe9, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
- 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x04, 0x74, 0x69, 0x6d, 0x65,
- 0x42, 0x10, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x74, 0x79, 0x70,
- 0x65, 0x73, 0x32, 0xed, 0x03, 0x0a, 0x08, 0x4f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12,
- 0x45, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x46, 0x6c, 0x6f, 0x77, 0x73, 0x12, 0x19, 0x2e, 0x6f, 0x62,
- 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x47, 0x65, 0x74, 0x46, 0x6c, 0x6f, 0x77, 0x73, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65,
- 0x72, 0x2e, 0x47, 0x65, 0x74, 0x46, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x57, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x41, 0x67, 0x65,
- 0x6e, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x1f, 0x2e, 0x6f, 0x62, 0x73, 0x65, 0x72,
- 0x76, 0x65, 0x72, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x45, 0x76, 0x65, 0x6e,
- 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x6f, 0x62, 0x73, 0x65,
- 0x72, 0x76, 0x65, 0x72, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x45, 0x76, 0x65,
- 0x6e, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12,
- 0x57, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x44, 0x65, 0x62, 0x75, 0x67, 0x45, 0x76, 0x65, 0x6e, 0x74,
- 0x73, 0x12, 0x1f, 0x2e, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x47, 0x65, 0x74,
- 0x44, 0x65, 0x62, 0x75, 0x67, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x47, 0x65,
- 0x74, 0x44, 0x65, 0x62, 0x75, 0x67, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x43, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x4e,
- 0x6f, 0x64, 0x65, 0x73, 0x12, 0x19, 0x2e, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e,
- 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
- 0x1a, 0x2e, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f,
- 0x64, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x52, 0x0a,
- 0x0d, 0x47, 0x65, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x12, 0x1e,
- 0x2e, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x61, 0x6d,
- 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f,
- 0x2e, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x61, 0x6d,
- 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
- 0x00, 0x12, 0x4f, 0x0a, 0x0c, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75,
- 0x73, 0x12, 0x1d, 0x2e, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x53, 0x65, 0x72,
- 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x1a, 0x1e, 0x2e, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x53, 0x65, 0x72, 0x76,
- 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
- 0x22, 0x00, 0x42, 0x2a, 0x5a, 0x28, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d,
- 0x2f, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2f, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2f, 0x61,
- 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x50, 0x03,
- 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x43, 0x0a, 0x08, 0x47, 0x65,
+ 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x12, 0x19, 0x2e, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65,
+ 0x72, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x1a, 0x1a, 0x2e, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x47, 0x65, 0x74,
+ 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12,
+ 0x52, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73,
+ 0x12, 0x1e, 0x2e, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x47, 0x65, 0x74, 0x4e,
+ 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x1f, 0x2e, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x47, 0x65, 0x74, 0x4e,
+ 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x22, 0x00, 0x12, 0x4f, 0x0a, 0x0c, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61,
+ 0x74, 0x75, 0x73, 0x12, 0x1d, 0x2e, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x53,
+ 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x53, 0x65,
+ 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x22, 0x00, 0x42, 0x2a, 0x5a, 0x28, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63,
+ 0x6f, 0x6d, 0x2f, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2f, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d,
+ 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72,
+ 0x50, 0x04, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
@@ -1944,13 +1979,14 @@ var file_observer_observer_proto_goTypes = []interface{}{
(*wrapperspb.UInt32Value)(nil), // 17: google.protobuf.UInt32Value
(*flow.FlowFilter)(nil), // 18: flow.FlowFilter
(*timestamppb.Timestamp)(nil), // 19: google.protobuf.Timestamp
- (*flow.Flow)(nil), // 20: flow.Flow
- (*relay.NodeStatusEvent)(nil), // 21: relay.NodeStatusEvent
- (*flow.LostEvent)(nil), // 22: flow.LostEvent
- (*flow.AgentEvent)(nil), // 23: flow.AgentEvent
- (*flow.DebugEvent)(nil), // 24: flow.DebugEvent
- (relay.NodeState)(0), // 25: relay.NodeState
- (*fieldmaskpb.FieldMask)(nil), // 26: google.protobuf.FieldMask
+ (*anypb.Any)(nil), // 20: google.protobuf.Any
+ (*flow.Flow)(nil), // 21: flow.Flow
+ (*relay.NodeStatusEvent)(nil), // 22: relay.NodeStatusEvent
+ (*flow.LostEvent)(nil), // 23: flow.LostEvent
+ (*flow.AgentEvent)(nil), // 24: flow.AgentEvent
+ (*flow.DebugEvent)(nil), // 25: flow.DebugEvent
+ (relay.NodeState)(0), // 26: relay.NodeState
+ (*fieldmaskpb.FieldMask)(nil), // 27: google.protobuf.FieldMask
}
var file_observer_observer_proto_depIdxs = []int32{
17, // 0: observer.ServerStatusResponse.num_connected_nodes:type_name -> google.protobuf.UInt32Value
@@ -1960,46 +1996,47 @@ var file_observer_observer_proto_depIdxs = []int32{
19, // 4: observer.GetFlowsRequest.since:type_name -> google.protobuf.Timestamp
19, // 5: observer.GetFlowsRequest.until:type_name -> google.protobuf.Timestamp
16, // 6: observer.GetFlowsRequest.experimental:type_name -> observer.GetFlowsRequest.Experimental
- 20, // 7: observer.GetFlowsResponse.flow:type_name -> flow.Flow
- 21, // 8: observer.GetFlowsResponse.node_status:type_name -> relay.NodeStatusEvent
- 22, // 9: observer.GetFlowsResponse.lost_events:type_name -> flow.LostEvent
- 19, // 10: observer.GetFlowsResponse.time:type_name -> google.protobuf.Timestamp
- 19, // 11: observer.GetAgentEventsRequest.since:type_name -> google.protobuf.Timestamp
- 19, // 12: observer.GetAgentEventsRequest.until:type_name -> google.protobuf.Timestamp
- 23, // 13: observer.GetAgentEventsResponse.agent_event:type_name -> flow.AgentEvent
- 19, // 14: observer.GetAgentEventsResponse.time:type_name -> google.protobuf.Timestamp
- 19, // 15: observer.GetDebugEventsRequest.since:type_name -> google.protobuf.Timestamp
- 19, // 16: observer.GetDebugEventsRequest.until:type_name -> google.protobuf.Timestamp
- 24, // 17: observer.GetDebugEventsResponse.debug_event:type_name -> flow.DebugEvent
- 19, // 18: observer.GetDebugEventsResponse.time:type_name -> google.protobuf.Timestamp
- 10, // 19: observer.GetNodesResponse.nodes:type_name -> observer.Node
- 25, // 20: observer.Node.state:type_name -> relay.NodeState
- 11, // 21: observer.Node.tls:type_name -> observer.TLS
- 14, // 22: observer.GetNamespacesResponse.namespaces:type_name -> observer.Namespace
- 20, // 23: observer.ExportEvent.flow:type_name -> flow.Flow
- 21, // 24: observer.ExportEvent.node_status:type_name -> relay.NodeStatusEvent
- 22, // 25: observer.ExportEvent.lost_events:type_name -> flow.LostEvent
- 23, // 26: observer.ExportEvent.agent_event:type_name -> flow.AgentEvent
- 24, // 27: observer.ExportEvent.debug_event:type_name -> flow.DebugEvent
- 19, // 28: observer.ExportEvent.time:type_name -> google.protobuf.Timestamp
- 26, // 29: observer.GetFlowsRequest.Experimental.field_mask:type_name -> google.protobuf.FieldMask
- 2, // 30: observer.Observer.GetFlows:input_type -> observer.GetFlowsRequest
- 4, // 31: observer.Observer.GetAgentEvents:input_type -> observer.GetAgentEventsRequest
- 6, // 32: observer.Observer.GetDebugEvents:input_type -> observer.GetDebugEventsRequest
- 8, // 33: observer.Observer.GetNodes:input_type -> observer.GetNodesRequest
- 12, // 34: observer.Observer.GetNamespaces:input_type -> observer.GetNamespacesRequest
- 0, // 35: observer.Observer.ServerStatus:input_type -> observer.ServerStatusRequest
- 3, // 36: observer.Observer.GetFlows:output_type -> observer.GetFlowsResponse
- 5, // 37: observer.Observer.GetAgentEvents:output_type -> observer.GetAgentEventsResponse
- 7, // 38: observer.Observer.GetDebugEvents:output_type -> observer.GetDebugEventsResponse
- 9, // 39: observer.Observer.GetNodes:output_type -> observer.GetNodesResponse
- 13, // 40: observer.Observer.GetNamespaces:output_type -> observer.GetNamespacesResponse
- 1, // 41: observer.Observer.ServerStatus:output_type -> observer.ServerStatusResponse
- 36, // [36:42] is the sub-list for method output_type
- 30, // [30:36] is the sub-list for method input_type
- 30, // [30:30] is the sub-list for extension type_name
- 30, // [30:30] is the sub-list for extension extendee
- 0, // [0:30] is the sub-list for field type_name
+ 20, // 7: observer.GetFlowsRequest.extensions:type_name -> google.protobuf.Any
+ 21, // 8: observer.GetFlowsResponse.flow:type_name -> flow.Flow
+ 22, // 9: observer.GetFlowsResponse.node_status:type_name -> relay.NodeStatusEvent
+ 23, // 10: observer.GetFlowsResponse.lost_events:type_name -> flow.LostEvent
+ 19, // 11: observer.GetFlowsResponse.time:type_name -> google.protobuf.Timestamp
+ 19, // 12: observer.GetAgentEventsRequest.since:type_name -> google.protobuf.Timestamp
+ 19, // 13: observer.GetAgentEventsRequest.until:type_name -> google.protobuf.Timestamp
+ 24, // 14: observer.GetAgentEventsResponse.agent_event:type_name -> flow.AgentEvent
+ 19, // 15: observer.GetAgentEventsResponse.time:type_name -> google.protobuf.Timestamp
+ 19, // 16: observer.GetDebugEventsRequest.since:type_name -> google.protobuf.Timestamp
+ 19, // 17: observer.GetDebugEventsRequest.until:type_name -> google.protobuf.Timestamp
+ 25, // 18: observer.GetDebugEventsResponse.debug_event:type_name -> flow.DebugEvent
+ 19, // 19: observer.GetDebugEventsResponse.time:type_name -> google.protobuf.Timestamp
+ 10, // 20: observer.GetNodesResponse.nodes:type_name -> observer.Node
+ 26, // 21: observer.Node.state:type_name -> relay.NodeState
+ 11, // 22: observer.Node.tls:type_name -> observer.TLS
+ 14, // 23: observer.GetNamespacesResponse.namespaces:type_name -> observer.Namespace
+ 21, // 24: observer.ExportEvent.flow:type_name -> flow.Flow
+ 22, // 25: observer.ExportEvent.node_status:type_name -> relay.NodeStatusEvent
+ 23, // 26: observer.ExportEvent.lost_events:type_name -> flow.LostEvent
+ 24, // 27: observer.ExportEvent.agent_event:type_name -> flow.AgentEvent
+ 25, // 28: observer.ExportEvent.debug_event:type_name -> flow.DebugEvent
+ 19, // 29: observer.ExportEvent.time:type_name -> google.protobuf.Timestamp
+ 27, // 30: observer.GetFlowsRequest.Experimental.field_mask:type_name -> google.protobuf.FieldMask
+ 2, // 31: observer.Observer.GetFlows:input_type -> observer.GetFlowsRequest
+ 4, // 32: observer.Observer.GetAgentEvents:input_type -> observer.GetAgentEventsRequest
+ 6, // 33: observer.Observer.GetDebugEvents:input_type -> observer.GetDebugEventsRequest
+ 8, // 34: observer.Observer.GetNodes:input_type -> observer.GetNodesRequest
+ 12, // 35: observer.Observer.GetNamespaces:input_type -> observer.GetNamespacesRequest
+ 0, // 36: observer.Observer.ServerStatus:input_type -> observer.ServerStatusRequest
+ 3, // 37: observer.Observer.GetFlows:output_type -> observer.GetFlowsResponse
+ 5, // 38: observer.Observer.GetAgentEvents:output_type -> observer.GetAgentEventsResponse
+ 7, // 39: observer.Observer.GetDebugEvents:output_type -> observer.GetDebugEventsResponse
+ 9, // 40: observer.Observer.GetNodes:output_type -> observer.GetNodesResponse
+ 13, // 41: observer.Observer.GetNamespaces:output_type -> observer.GetNamespacesResponse
+ 1, // 42: observer.Observer.ServerStatus:output_type -> observer.ServerStatusResponse
+ 37, // [37:43] is the sub-list for method output_type
+ 31, // [31:37] is the sub-list for method input_type
+ 31, // [31:31] is the sub-list for extension type_name
+ 31, // [31:31] is the sub-list for extension extendee
+ 0, // [0:31] is the sub-list for field type_name
}
func init() { file_observer_observer_proto_init() }
diff --git a/vendor/github.com/cilium/cilium/api/v1/observer/observer.proto b/vendor/github.com/cilium/cilium/api/v1/observer/observer.proto
index f6844e7bf..6ef15a8d9 100644
--- a/vendor/github.com/cilium/cilium/api/v1/observer/observer.proto
+++ b/vendor/github.com/cilium/cilium/api/v1/observer/observer.proto
@@ -3,6 +3,7 @@
syntax = "proto3";
+import "google/protobuf/any.proto";
import "google/protobuf/wrappers.proto";
import "google/protobuf/timestamp.proto";
import "google/protobuf/field_mask.proto";
@@ -73,6 +74,10 @@ message ServerStatusResponse {
// Version is the version of Cilium/Hubble.
string version = 8;
+
+ // Approximate rate of flows seen by Hubble per second over the last minute.
+ // In a multi-node context, this is the sum of all flows rates.
+ double flows_rate = 9;
}
message GetFlowsRequest {
@@ -122,6 +127,11 @@ message GetFlowsRequest {
google.protobuf.FieldMask field_mask = 1;
}
Experimental experimental = 999;
+
+ // extensions can be used to add arbitrary additional metadata to GetFlowsRequest.
+ // This can be used to extend functionality for other Hubble compatible
+ // APIs, or experiment with new functionality without needing to change the public API.
+ google.protobuf.Any extensions = 150000;
}
// GetFlowsResponse contains either a flow or a protocol message.
diff --git a/vendor/github.com/cilium/cilium/api/v1/observer/observer_grpc.pb.go b/vendor/github.com/cilium/cilium/api/v1/observer/observer_grpc.pb.go
index 198b72937..fc67208cb 100644
--- a/vendor/github.com/cilium/cilium/api/v1/observer/observer_grpc.pb.go
+++ b/vendor/github.com/cilium/cilium/api/v1/observer/observer_grpc.pb.go
@@ -4,7 +4,7 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.3.0
-// - protoc v4.22.3
+// - protoc v4.24.0
// source: observer/observer.proto
package observer
diff --git a/vendor/github.com/cilium/cilium/api/v1/peer/peer.pb.go b/vendor/github.com/cilium/cilium/api/v1/peer/peer.pb.go
index a85ed2b9a..4a2712ba8 100644
--- a/vendor/github.com/cilium/cilium/api/v1/peer/peer.pb.go
+++ b/vendor/github.com/cilium/cilium/api/v1/peer/peer.pb.go
@@ -3,8 +3,8 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.30.0
-// protoc v4.22.3
+// protoc-gen-go v1.31.0
+// protoc v4.24.0
// source: peer/peer.proto
package peer
diff --git a/vendor/github.com/cilium/cilium/api/v1/peer/peer_grpc.pb.go b/vendor/github.com/cilium/cilium/api/v1/peer/peer_grpc.pb.go
index 4307d6c72..03032c2b4 100644
--- a/vendor/github.com/cilium/cilium/api/v1/peer/peer_grpc.pb.go
+++ b/vendor/github.com/cilium/cilium/api/v1/peer/peer_grpc.pb.go
@@ -4,7 +4,7 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.3.0
-// - protoc v4.22.3
+// - protoc v4.24.0
// source: peer/peer.proto
package peer
diff --git a/vendor/github.com/cilium/cilium/api/v1/recorder/recorder.pb.go b/vendor/github.com/cilium/cilium/api/v1/recorder/recorder.pb.go
index 14ef43820..93e1259d3 100644
--- a/vendor/github.com/cilium/cilium/api/v1/recorder/recorder.pb.go
+++ b/vendor/github.com/cilium/cilium/api/v1/recorder/recorder.pb.go
@@ -3,8 +3,8 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.30.0
-// protoc v4.22.3
+// protoc-gen-go v1.31.0
+// protoc v4.24.0
// source: recorder/recorder.proto
package recorder
diff --git a/vendor/github.com/cilium/cilium/api/v1/recorder/recorder_grpc.pb.go b/vendor/github.com/cilium/cilium/api/v1/recorder/recorder_grpc.pb.go
index 1e026d533..508364e87 100644
--- a/vendor/github.com/cilium/cilium/api/v1/recorder/recorder_grpc.pb.go
+++ b/vendor/github.com/cilium/cilium/api/v1/recorder/recorder_grpc.pb.go
@@ -4,7 +4,7 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.3.0
-// - protoc v4.22.3
+// - protoc v4.24.0
// source: recorder/recorder.proto
package recorder
diff --git a/vendor/github.com/cilium/cilium/api/v1/relay/relay.pb.go b/vendor/github.com/cilium/cilium/api/v1/relay/relay.pb.go
index 1f40c6c30..a4ae994cc 100644
--- a/vendor/github.com/cilium/cilium/api/v1/relay/relay.pb.go
+++ b/vendor/github.com/cilium/cilium/api/v1/relay/relay.pb.go
@@ -3,8 +3,8 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.30.0
-// protoc v4.22.3
+// protoc-gen-go v1.31.0
+// protoc v4.24.0
// source: relay/relay.proto
package relay
diff --git a/vendor/github.com/cilium/cilium/pkg/alibabacloud/eni/types/doc.go b/vendor/github.com/cilium/cilium/pkg/alibabacloud/eni/types/doc.go
new file mode 100644
index 000000000..a4d5ab3b9
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/alibabacloud/eni/types/doc.go
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// +k8s:deepcopy-gen=package
+// +deepequal-gen=package
+
+// Package types provides Alibaba Cloud specific types
+package types
diff --git a/vendor/github.com/cilium/cilium/pkg/alibabacloud/eni/types/types.go b/vendor/github.com/cilium/cilium/pkg/alibabacloud/eni/types/types.go
new file mode 100644
index 000000000..19daaaaec
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/alibabacloud/eni/types/types.go
@@ -0,0 +1,199 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package types
+
+import (
+ "github.com/cilium/cilium/pkg/ipam/types"
+)
+
+// Spec is the ENI specification of a node. This specification is considered
+// by the cilium-operator to act as an IPAM operator and makes ENI IPs available
+// via the IPAMSpec section.
+//
+// The ENI specification can either be provided explicitly by the user or the
+// cilium-agent running on the node can be instructed to create the CiliumNode
+// custom resource along with an ENI specification when the node registers
+// itself to the Kubernetes cluster.
+type Spec struct {
+ // InstanceType is the ECS instance type, e.g. "ecs.g6.2xlarge"
+ //
+ // +kubebuilder:validation:Optional
+ InstanceType string `json:"instance-type,omitempty"`
+
+ // AvailabilityZone is the availability zone to use when allocating
+ // ENIs.
+ //
+ // +kubebuilder:validation:Optional
+ AvailabilityZone string `json:"availability-zone,omitempty"`
+
+ // VPCID is the VPC ID to use when allocating ENIs.
+ //
+ // +kubebuilder:validation:Optional
+ VPCID string `json:"vpc-id,omitempty"`
+
+ // CIDRBlock is vpc ipv4 CIDR
+ //
+ // +kubebuilder:validation:Optional
+ CIDRBlock string `json:"cidr-block,omitempty"`
+
+ // VSwitches is the ID of vSwitch available for ENI
+ //
+ // +kubebuilder:validation:Optional
+ VSwitches []string `json:"vswitches,omitempty"`
+
+ // VSwitchTags is the list of tags to use when evaluating which
+ // vSwitch to use for the ENI.
+ //
+ // +kubebuilder:validation:Optional
+ VSwitchTags map[string]string `json:"vswitch-tags,omitempty"`
+
+ // SecurityGroups is the list of security groups to attach to any ENI
+ // that is created and attached to the instance.
+ //
+ // +kubebuilder:validation:Optional
+ SecurityGroups []string `json:"security-groups,omitempty"`
+
+ // SecurityGroupTags is the list of tags to use when evaluating which
+ // security groups to use for the ENI.
+ //
+ // +kubebuilder:validation:Optional
+ SecurityGroupTags map[string]string `json:"security-group-tags,omitempty"`
+}
+
+const (
+ // ENITypePrimary is the type for ENI
+ ENITypePrimary string = "Primary"
+ // ENITypeSecondary is the type for ENI
+ ENITypeSecondary string = "Secondary"
+)
+
+// ENI represents an AlibabaCloud Elastic Network Interface
+type ENI struct {
+ // NetworkInterfaceID is the ENI id
+ //
+ // +optional
+ NetworkInterfaceID string `json:"network-interface-id,omitempty"`
+
+ // MACAddress is the mac address of the ENI
+ //
+ // +optional
+ MACAddress string `json:"mac-address,omitempty"`
+
+ // Type is the ENI type Primary or Secondary
+ //
+ // +optional
+ Type string `json:"type,omitempty"`
+
+ // InstanceID is the InstanceID using this ENI
+ //
+ // +optional
+ InstanceID string `json:"instance-id,omitempty"`
+
+ // SecurityGroupIDs is the security group ids used by this ENI
+ //
+ // +optional
+ SecurityGroupIDs []string `json:"security-groupids,omitempty"`
+
+ // VPC is the vpc to which the ENI belongs
+ //
+ // +optional
+ VPC VPC `json:"vpc,omitempty"`
+
+ // ZoneID is the zone to which the ENI belongs
+ //
+ // +optional
+ ZoneID string `json:"zone-id,omitempty"`
+
+ // VSwitch is the vSwitch the ENI is using
+ //
+ // +optional
+ VSwitch VSwitch `json:"vswitch,omitempty"`
+
+ // PrimaryIPAddress is the primary IP on ENI
+ //
+ // +optional
+ PrimaryIPAddress string `json:"primary-ip-address,omitempty"`
+
+ // PrivateIPSets is the list of all IPs on the ENI, including PrimaryIPAddress
+ //
+ // +optional
+ PrivateIPSets []PrivateIPSet `json:"private-ipsets,omitempty"`
+
+ // Tags is the tags on this ENI
+ //
+ // +optional
+ Tags map[string]string `json:"tags,omitempty"`
+}
+
+// InterfaceID returns the identifier of the interface
+func (e *ENI) InterfaceID() string {
+ return e.NetworkInterfaceID
+}
+
+// ForeachAddress iterates over all addresses and calls fn
+func (e *ENI) ForeachAddress(id string, fn types.AddressIterator) error {
+ for _, address := range e.PrivateIPSets {
+ if address.Primary {
+ continue
+ }
+ if err := fn(id, e.NetworkInterfaceID, address.PrivateIpAddress, "", address); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// ENIStatus is the status of ENI addressing of the node
+type ENIStatus struct {
+ // ENIs is the list of ENIs on the node
+ //
+ // +optional
+ ENIs map[string]ENI `json:"enis,omitempty"`
+}
+
+// PrivateIPSet is a nested struct in ecs response
+type PrivateIPSet struct {
+ PrivateIpAddress string `json:"private-ip-address,omitempty"`
+ Primary bool `json:"primary,omitempty" `
+}
+
+type VPC struct {
+ // VPCID is the vpc to which the ENI belongs
+ //
+ // +optional
+ VPCID string `json:"vpc-id,omitempty"`
+
+ // CIDRBlock is the VPC IPv4 CIDR
+ //
+ // +optional
+ CIDRBlock string `json:"cidr,omitempty"`
+
+ // IPv6CIDRBlock is the VPC IPv6 CIDR
+ //
+ // +optional
+ IPv6CIDRBlock string `json:"ipv6-cidr,omitempty"`
+
+ // SecondaryCIDRs is the list of Secondary CIDRs associated with the VPC
+ //
+ // +optional
+ SecondaryCIDRs []string `json:"secondary-cidrs,omitempty"`
+}
+
+type VSwitch struct {
+ // VSwitchID is the vSwitch to which the ENI belongs
+ //
+ // +optional
+ VSwitchID string `json:"vswitch-id,omitempty"`
+
+ // CIDRBlock is the vSwitch IPv4 CIDR
+ //
+ // +optional
+ CIDRBlock string `json:"cidr,omitempty"`
+
+ // IPv6CIDRBlock is the vSwitch IPv6 CIDR
+ //
+ // +optional
+ IPv6CIDRBlock string `json:"ipv6-cidr,omitempty"`
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/alibabacloud/eni/types/zz_generated.deepcopy.go b/vendor/github.com/cilium/cilium/pkg/alibabacloud/eni/types/zz_generated.deepcopy.go
new file mode 100644
index 000000000..309101578
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/alibabacloud/eni/types/zz_generated.deepcopy.go
@@ -0,0 +1,160 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package types
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ENI) DeepCopyInto(out *ENI) {
+ *out = *in
+ if in.SecurityGroupIDs != nil {
+ in, out := &in.SecurityGroupIDs, &out.SecurityGroupIDs
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ in.VPC.DeepCopyInto(&out.VPC)
+ out.VSwitch = in.VSwitch
+ if in.PrivateIPSets != nil {
+ in, out := &in.PrivateIPSets, &out.PrivateIPSets
+ *out = make([]PrivateIPSet, len(*in))
+ copy(*out, *in)
+ }
+ if in.Tags != nil {
+ in, out := &in.Tags, &out.Tags
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ENI.
+func (in *ENI) DeepCopy() *ENI {
+ if in == nil {
+ return nil
+ }
+ out := new(ENI)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ENIStatus) DeepCopyInto(out *ENIStatus) {
+ *out = *in
+ if in.ENIs != nil {
+ in, out := &in.ENIs, &out.ENIs
+ *out = make(map[string]ENI, len(*in))
+ for key, val := range *in {
+ (*out)[key] = *val.DeepCopy()
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ENIStatus.
+func (in *ENIStatus) DeepCopy() *ENIStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ENIStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PrivateIPSet) DeepCopyInto(out *PrivateIPSet) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateIPSet.
+func (in *PrivateIPSet) DeepCopy() *PrivateIPSet {
+ if in == nil {
+ return nil
+ }
+ out := new(PrivateIPSet)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Spec) DeepCopyInto(out *Spec) {
+ *out = *in
+ if in.VSwitches != nil {
+ in, out := &in.VSwitches, &out.VSwitches
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.VSwitchTags != nil {
+ in, out := &in.VSwitchTags, &out.VSwitchTags
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.SecurityGroups != nil {
+ in, out := &in.SecurityGroups, &out.SecurityGroups
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.SecurityGroupTags != nil {
+ in, out := &in.SecurityGroupTags, &out.SecurityGroupTags
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Spec.
+func (in *Spec) DeepCopy() *Spec {
+ if in == nil {
+ return nil
+ }
+ out := new(Spec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VPC) DeepCopyInto(out *VPC) {
+ *out = *in
+ if in.SecondaryCIDRs != nil {
+ in, out := &in.SecondaryCIDRs, &out.SecondaryCIDRs
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPC.
+func (in *VPC) DeepCopy() *VPC {
+ if in == nil {
+ return nil
+ }
+ out := new(VPC)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VSwitch) DeepCopyInto(out *VSwitch) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSwitch.
+func (in *VSwitch) DeepCopy() *VSwitch {
+ if in == nil {
+ return nil
+ }
+ out := new(VSwitch)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/alibabacloud/eni/types/zz_generated.deepequal.go b/vendor/github.com/cilium/cilium/pkg/alibabacloud/eni/types/zz_generated.deepequal.go
new file mode 100644
index 000000000..55fea351a
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/alibabacloud/eni/types/zz_generated.deepequal.go
@@ -0,0 +1,302 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by deepequal-gen. DO NOT EDIT.
+
+package types
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *ENI) DeepEqual(other *ENI) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.NetworkInterfaceID != other.NetworkInterfaceID {
+ return false
+ }
+ if in.MACAddress != other.MACAddress {
+ return false
+ }
+ if in.Type != other.Type {
+ return false
+ }
+ if in.InstanceID != other.InstanceID {
+ return false
+ }
+ if ((in.SecurityGroupIDs != nil) && (other.SecurityGroupIDs != nil)) || ((in.SecurityGroupIDs == nil) != (other.SecurityGroupIDs == nil)) {
+ in, other := &in.SecurityGroupIDs, &other.SecurityGroupIDs
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if inElement != (*other)[i] {
+ return false
+ }
+ }
+ }
+ }
+
+ if !in.VPC.DeepEqual(&other.VPC) {
+ return false
+ }
+
+ if in.ZoneID != other.ZoneID {
+ return false
+ }
+ if in.VSwitch != other.VSwitch {
+ return false
+ }
+
+ if in.PrimaryIPAddress != other.PrimaryIPAddress {
+ return false
+ }
+ if ((in.PrivateIPSets != nil) && (other.PrivateIPSets != nil)) || ((in.PrivateIPSets == nil) != (other.PrivateIPSets == nil)) {
+ in, other := &in.PrivateIPSets, &other.PrivateIPSets
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if !inElement.DeepEqual(&(*other)[i]) {
+ return false
+ }
+ }
+ }
+ }
+
+ if ((in.Tags != nil) && (other.Tags != nil)) || ((in.Tags == nil) != (other.Tags == nil)) {
+ in, other := &in.Tags, &other.Tags
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for key, inValue := range *in {
+ if otherValue, present := (*other)[key]; !present {
+ return false
+ } else {
+ if inValue != otherValue {
+ return false
+ }
+ }
+ }
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *ENIStatus) DeepEqual(other *ENIStatus) bool {
+ if other == nil {
+ return false
+ }
+
+ if ((in.ENIs != nil) && (other.ENIs != nil)) || ((in.ENIs == nil) != (other.ENIs == nil)) {
+ in, other := &in.ENIs, &other.ENIs
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for key, inValue := range *in {
+ if otherValue, present := (*other)[key]; !present {
+ return false
+ } else {
+ if !inValue.DeepEqual(&otherValue) {
+ return false
+ }
+ }
+ }
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *PrivateIPSet) DeepEqual(other *PrivateIPSet) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.PrivateIpAddress != other.PrivateIpAddress {
+ return false
+ }
+ if in.Primary != other.Primary {
+ return false
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *Spec) DeepEqual(other *Spec) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.InstanceType != other.InstanceType {
+ return false
+ }
+ if in.AvailabilityZone != other.AvailabilityZone {
+ return false
+ }
+ if in.VPCID != other.VPCID {
+ return false
+ }
+ if in.CIDRBlock != other.CIDRBlock {
+ return false
+ }
+ if ((in.VSwitches != nil) && (other.VSwitches != nil)) || ((in.VSwitches == nil) != (other.VSwitches == nil)) {
+ in, other := &in.VSwitches, &other.VSwitches
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if inElement != (*other)[i] {
+ return false
+ }
+ }
+ }
+ }
+
+ if ((in.VSwitchTags != nil) && (other.VSwitchTags != nil)) || ((in.VSwitchTags == nil) != (other.VSwitchTags == nil)) {
+ in, other := &in.VSwitchTags, &other.VSwitchTags
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for key, inValue := range *in {
+ if otherValue, present := (*other)[key]; !present {
+ return false
+ } else {
+ if inValue != otherValue {
+ return false
+ }
+ }
+ }
+ }
+ }
+
+ if ((in.SecurityGroups != nil) && (other.SecurityGroups != nil)) || ((in.SecurityGroups == nil) != (other.SecurityGroups == nil)) {
+ in, other := &in.SecurityGroups, &other.SecurityGroups
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if inElement != (*other)[i] {
+ return false
+ }
+ }
+ }
+ }
+
+ if ((in.SecurityGroupTags != nil) && (other.SecurityGroupTags != nil)) || ((in.SecurityGroupTags == nil) != (other.SecurityGroupTags == nil)) {
+ in, other := &in.SecurityGroupTags, &other.SecurityGroupTags
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for key, inValue := range *in {
+ if otherValue, present := (*other)[key]; !present {
+ return false
+ } else {
+ if inValue != otherValue {
+ return false
+ }
+ }
+ }
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *VPC) DeepEqual(other *VPC) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.VPCID != other.VPCID {
+ return false
+ }
+ if in.CIDRBlock != other.CIDRBlock {
+ return false
+ }
+ if in.IPv6CIDRBlock != other.IPv6CIDRBlock {
+ return false
+ }
+ if ((in.SecondaryCIDRs != nil) && (other.SecondaryCIDRs != nil)) || ((in.SecondaryCIDRs == nil) != (other.SecondaryCIDRs == nil)) {
+ in, other := &in.SecondaryCIDRs, &other.SecondaryCIDRs
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if inElement != (*other)[i] {
+ return false
+ }
+ }
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *VSwitch) DeepEqual(other *VSwitch) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.VSwitchID != other.VSwitchID {
+ return false
+ }
+ if in.CIDRBlock != other.CIDRBlock {
+ return false
+ }
+ if in.IPv6CIDRBlock != other.IPv6CIDRBlock {
+ return false
+ }
+
+ return true
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/allocator/allocator.go b/vendor/github.com/cilium/cilium/pkg/allocator/allocator.go
new file mode 100644
index 000000000..19091fecd
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/allocator/allocator.go
@@ -0,0 +1,1051 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package allocator
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "time"
+
+ "github.com/google/uuid"
+ "github.com/sirupsen/logrus"
+
+ "github.com/cilium/cilium/pkg/backoff"
+ "github.com/cilium/cilium/pkg/idpool"
+ "github.com/cilium/cilium/pkg/inctimer"
+ "github.com/cilium/cilium/pkg/kvstore"
+ "github.com/cilium/cilium/pkg/lock"
+ "github.com/cilium/cilium/pkg/logging"
+ "github.com/cilium/cilium/pkg/logging/logfields"
+ "github.com/cilium/cilium/pkg/option"
+ "github.com/cilium/cilium/pkg/rate"
+)
+
+var (
+ log = logging.DefaultLogger.WithField(logfields.LogSubsys, "allocator")
+)
+
+const (
+ // maxAllocAttempts is the number of attempted allocation requests
+ // performed before failing.
+ maxAllocAttempts = 16
+)
+
+// Allocator is a distributed ID allocator backed by a KVstore. It maps
+// arbitrary keys to identifiers. Multiple users on different cluster nodes can
+// in parallel request the ID for keys and are guaranteed to retrieve the same
+// ID for an identical key.
+//
+// While the details of how keys are stored is delegated to Backend
+// implementations, some expectations exist. See pkg/kvstore/allocator for
+// details about the kvstore implementation.
+//
+// A node takes a reference to an identity when it is in-use on that node, and
+// the identity remains in-use if there is any node reference to it. When an
+// identity no longer has any node references, it may be garbage collected. No
+// guarantees are made at that point and the numeric identity may be reused.
+// Note that the numeric IDs are selected locally and verified with the Backend.
+//
+// Lookup ID by key:
+// 1. Return ID from local cache updated by watcher (no Backend interactions)
+// 2. Do ListPrefix() on slave key excluding node suffix, return the first
+// result that matches the exact prefix.
+//
+// Lookup key by ID:
+// 1. Return key from local cache updated by watcher (no Backend interactions)
+// 2. Do Get() on master key, return result
+//
+// Allocate:
+// 1. Check local key cache, increment, and return if key is already in use
+// locally (no Backend interactions)
+// 2. Check local cache updated by watcher, if...
+//
+// ... match found:
+//
+// 2.1 Create a new slave key. This operation is potentially racy as the master
+// key can be removed in the meantime.
+// - etcd: Create is made conditional on existence of master key
+// - consul: locking
+//
+// ... match not found:
+//
+// 2.1 Select new unused id from local cache
+// 2.2 Create a new master key with the condition that it may not exist
+// 2.3 Create a new slave key
+//
+// 1.1. If found, increment and return (no Backend interactions)
+// 2. Lookup ID by key in local cache or via first slave key found in Backend
+//
+// Release:
+// 1. Reduce local reference count until last use (no Backend interactions)
+// 2. Delete slave key (basePath/value/key1/node1)
+// This automatically guarantees that when the last node has released the
+// key, the key is no longer found by Get()
+// 3. If the node goes down, all slave keys of that node are removed after
+// the TTL expires (auto release).
+type Allocator struct {
+ // events is a channel which will receive AllocatorEvent as IDs are
+ // added, modified or removed from the allocator
+ events AllocatorEventSendChan
+
+ // keyType is an instance of the type to be used as allocator key.
+ keyType AllocatorKey
+
+ // min is the lower limit when allocating IDs. The allocator will never
+ // allocate an ID lesser than this value.
+ min idpool.ID
+
+ // max is the upper limit when allocating IDs. The allocator will never
+ // allocate an ID greater than this value.
+ max idpool.ID
+
+ // prefixMask if set, will be ORed to all selected IDs prior to
+ // allocation
+ prefixMask idpool.ID
+
+ // localKeys contains all keys including their reference count for keys
+ // which have been allocated and are in local use
+ localKeys *localKeys
+
+ // suffix is the suffix attached to keys which must be node specific,
+ // this is typical set to the node's IP address
+ suffix string
+
+ // backoffTemplate is the backoff configuration while allocating
+ backoffTemplate backoff.Exponential
+
+ // slaveKeysMutex protects the concurrent access of the slave key by this
+ // agent.
+ slaveKeysMutex lock.Mutex
+
+ // mainCache is the main cache, representing the allocator contents of
+ // the primary kvstore connection
+ mainCache cache
+
+ // remoteCachesMutex protects accesse to remoteCaches
+ remoteCachesMutex lock.RWMutex
+
+ // remoteCaches is the list of additional remote caches being watched
+ // in addition to the main cache
+ remoteCaches map[string]*RemoteCache
+
+ // stopGC is the channel used to stop the garbage collector
+ stopGC chan struct{}
+
+ // initialListDone is a channel that is closed when the initial
+ // synchronization has completed
+ initialListDone waitChan
+
+ // idPool maintains a pool of available ids for allocation.
+ idPool idpool.IDPool
+
+ // enableMasterKeyProtection if true, causes master keys that are still in
+ // local use to be automatically re-created
+ enableMasterKeyProtection bool
+
+ // disableGC disables the garbage collector
+ disableGC bool
+
+ // disableAutostart prevents starting the allocator when it is initialized
+ disableAutostart bool
+
+ // backend is the upstream, shared, backend to which we syncronize local
+ // information
+ backend Backend
+}
+
+// AllocatorOption is the base type for allocator options
+type AllocatorOption func(*Allocator)
+
+// NewAllocatorForGC returns an allocator that can be used to run RunGC()
+//
+// The allocator can be configured by passing in additional options:
+// - WithMin(id) - minimum ID to allocate (default: 1)
+// - WithMax(id) - maximum ID to allocate (default max(uint64))
+func NewAllocatorForGC(backend Backend, opts ...AllocatorOption) *Allocator {
+ a := &Allocator{
+ backend: backend,
+ min: idpool.ID(1),
+ max: idpool.ID(^uint64(0)),
+ }
+
+ for _, fn := range opts {
+ fn(a)
+ }
+
+ return a
+}
+
+type GCStats struct {
+ // Alive is the number of identities alive
+ Alive int
+
+ // Deleted is the number of identities deleted
+ Deleted int
+}
+
+// Backend represents clients to remote ID allocation systems, such as KV
+// Stores. These are used to coordinate key->ID allocation between cilium
+// nodes.
+type Backend interface {
+ // DeleteAllKeys will delete all keys. It is used in tests.
+ DeleteAllKeys(ctx context.Context)
+
+ // Encode encodes a key string as required to conform to the key
+ // restrictions of the backend
+ Encode(string) string
+
+ // AllocateID creates a new key->ID association. This is expected to be a
+ // create-only operation, and the ID may be allocated by another node. An
+ // error in that case is not expected to be fatal. The actual ID is obtained
+ // by Allocator from the local idPool, which is updated with used-IDs as the
+ // Backend makes calls to the handler in ListAndWatch.
+ AllocateID(ctx context.Context, id idpool.ID, key AllocatorKey) error
+
+ // AllocateIDIfLocked behaves like AllocateID but when lock is non-nil the
+ // operation proceeds only if it is still valid.
+ AllocateIDIfLocked(ctx context.Context, id idpool.ID, key AllocatorKey, lock kvstore.KVLocker) error
+
+ // AcquireReference records that this node is using this key->ID mapping.
+ // This is distinct from any reference counting within this agent; only one
+ // reference exists for this node for any number of managed endpoints using
+ // it.
+ // The semantics of cleaning up stale references is delegated to the Backend
+ // implementation. RunGC may need to be invoked.
+ // This can race, and so lock can be provided (via a Lock call, below).
+ AcquireReference(ctx context.Context, id idpool.ID, key AllocatorKey, lock kvstore.KVLocker) error
+
+ // Release releases the use of an ID associated with the provided key. It
+ // does not guard against concurrent calls to
+ // releases.Release(ctx context.Context, key AllocatorKey) (err error)
+ Release(ctx context.Context, id idpool.ID, key AllocatorKey) (err error)
+
+ // UpdateKey refreshes the record that this node is using this key -> id
+ // mapping. When reliablyMissing is set it will also recreate missing master or
+ // slave keys.
+ UpdateKey(ctx context.Context, id idpool.ID, key AllocatorKey, reliablyMissing bool) error
+
+ // UpdateKeyIfLocked behaves like UpdateKey but when lock is non-nil the operation proceeds only if it is still valid.
+ UpdateKeyIfLocked(ctx context.Context, id idpool.ID, key AllocatorKey, reliablyMissing bool, lock kvstore.KVLocker) error
+
+ // Get returns the allocated ID for this key as seen by the Backend. This may
+ // have been created by other agents.
+ Get(ctx context.Context, key AllocatorKey) (idpool.ID, error)
+
+ // GetIfLocked behaves like Get, but but when lock is non-nil the
+ // operation proceeds only if it is still valid.
+ GetIfLocked(ctx context.Context, key AllocatorKey, lock kvstore.KVLocker) (idpool.ID, error)
+
+ // GetByID returns the key associated with this ID, as seen by the Backend.
+ // This may have been created by other agents.
+ GetByID(ctx context.Context, id idpool.ID) (AllocatorKey, error)
+
+ // Lock provides an opaque lock object that can be used, later, to ensure
+ // that the key has not changed since the lock was created. This can be done
+ // with GetIfLocked.
+ Lock(ctx context.Context, key AllocatorKey) (kvstore.KVLocker, error)
+
+ // ListAndWatch begins synchronizing the local Backend instance with its
+ // remote.
+ ListAndWatch(ctx context.Context, handler CacheMutations, stopChan chan struct{})
+
+ // RunGC reaps stale or unused identities within the Backend and makes them
+ // available for reuse. It is used by the cilium-operator and is not invoked
+ // by cilium-agent.
+ // Note: not all Backend implemenations rely on this, such as the kvstore
+ // backends, and may use leases to expire keys.
+ RunGC(ctx context.Context, rateLimit *rate.Limiter, staleKeysPrevRound map[string]uint64, minID idpool.ID, maxID idpool.ID) (map[string]uint64, *GCStats, error)
+
+ // RunLocksGC reaps stale or unused locks within the Backend. It is used by
+ // the cilium-operator and is not invoked by cilium-agent. Returns
+ // a map of locks currently being held in the KVStore including the ones
+ // that failed to be GCed.
+ // Note: not all Backend implementations rely on this, such as the kvstore
+ // backends, and may use leases to expire keys.
+ RunLocksGC(ctx context.Context, staleKeysPrevRound map[string]kvstore.Value) (map[string]kvstore.Value, error)
+
+ // Status returns a human-readable status of the Backend.
+ Status() (string, error)
+}
+
+// NewAllocator creates a new Allocator. Any type can be used as key as long as
+// the type implements the AllocatorKey interface. A variable of the type has
+// to be passed into NewAllocator() to make the type known. The specified base
+// path is used to prefix all keys in the kvstore. The provided path must be
+// unique.
+//
+// The allocator can be configured by passing in additional options:
+// - WithEvents() - enable Events channel
+// - WithMin(id) - minimum ID to allocate (default: 1)
+// - WithMax(id) - maximum ID to allocate (default max(uint64))
+//
+// After creation, IDs can be allocated with Allocate() and released with
+// Release()
+func NewAllocator(typ AllocatorKey, backend Backend, opts ...AllocatorOption) (*Allocator, error) {
+ a := &Allocator{
+ keyType: typ,
+ backend: backend,
+ min: idpool.ID(1),
+ max: idpool.ID(^uint64(0)),
+ localKeys: newLocalKeys(),
+ stopGC: make(chan struct{}),
+ suffix: uuid.New().String()[:10],
+ remoteCaches: map[string]*RemoteCache{},
+ backoffTemplate: backoff.Exponential{
+ Min: time.Duration(20) * time.Millisecond,
+ Factor: 2.0,
+ },
+ }
+
+ for _, fn := range opts {
+ fn(a)
+ }
+
+ a.mainCache = newCache(a)
+
+ if a.suffix == "" {
+ return nil, errors.New("allocator suffix is and unlikely unique")
+ }
+
+ if a.min < 1 {
+ return nil, errors.New("minimum ID must be >= 1")
+ }
+
+ if a.max <= a.min {
+ return nil, fmt.Errorf("maximum ID must be greater than minimum ID: configured max %v, min %v", a.max, a.min)
+ }
+
+ a.idPool = idpool.NewIDPool(a.min, a.max)
+
+ if !a.disableAutostart {
+ a.start()
+ }
+
+ return a, nil
+}
+
+func (a *Allocator) start() {
+ a.initialListDone = a.mainCache.start()
+ if !a.disableGC {
+ go func() {
+ select {
+ case <-a.initialListDone:
+ case <-time.After(option.Config.AllocatorListTimeout):
+ log.Fatalf("Timeout while waiting for initial allocator state")
+ }
+ a.startLocalKeySync()
+ }()
+ }
+}
+
+// WithBackend sets this allocator to use backend. It is expected to be used at
+// initialization.
+func WithBackend(backend Backend) AllocatorOption {
+ return func(a *Allocator) {
+ a.backend = backend
+ }
+}
+
+// WithEvents enables receiving of events.
+//
+// CAUTION: When using this function. The provided channel must be continuously
+// read while NewAllocator() is being called to ensure that the channel does
+// not block indefinitely while NewAllocator() emits events on it while
+// populating the initial cache.
+func WithEvents(events AllocatorEventSendChan) AllocatorOption {
+ return func(a *Allocator) { a.events = events }
+}
+
+// WithMin sets the minimum identifier to be allocated
+func WithMin(id idpool.ID) AllocatorOption {
+ return func(a *Allocator) { a.min = id }
+}
+
+// WithMax sets the maximum identifier to be allocated
+func WithMax(id idpool.ID) AllocatorOption {
+ return func(a *Allocator) { a.max = id }
+}
+
+// WithPrefixMask sets the prefix used for all ID allocations. If set, the mask
+// will be ORed to all selected IDs prior to allocation. It is the
+// responsibility of the caller to ensure that the mask is not conflicting with
+// min..max.
+func WithPrefixMask(mask idpool.ID) AllocatorOption {
+ return func(a *Allocator) { a.prefixMask = mask }
+}
+
+// WithMasterKeyProtection will watch for delete events on master keys and
+// re-created them if local usage suggests that the key is still in use
+func WithMasterKeyProtection() AllocatorOption {
+ return func(a *Allocator) { a.enableMasterKeyProtection = true }
+}
+
+// WithoutGC disables the use of the garbage collector
+func WithoutGC() AllocatorOption {
+ return func(a *Allocator) { a.disableGC = true }
+}
+
+// WithoutAutostart prevents starting the allocator when it is initialized
+func WithoutAutostart() AllocatorOption {
+ return func(a *Allocator) { a.disableAutostart = true }
+}
+
+// GetEvents returns the events channel given to the allocator when
+// constructed.
+// Note: This channel is not owned by the allocator!
+func (a *Allocator) GetEvents() AllocatorEventSendChan {
+ return a.events
+}
+
+// Delete deletes an allocator and stops the garbage collector
+func (a *Allocator) Delete() {
+ close(a.stopGC)
+ a.mainCache.stop()
+}
+
+// WaitForInitialSync waits until the initial sync is complete
+func (a *Allocator) WaitForInitialSync(ctx context.Context) error {
+ select {
+ case <-a.initialListDone:
+ case <-ctx.Done():
+ return fmt.Errorf("identity sync was cancelled: %s", ctx.Err())
+ }
+
+ return nil
+}
+
+// RangeFunc is the function called by RangeCache
+type RangeFunc func(idpool.ID, AllocatorKey)
+
+// ForeachCache iterates over the allocator cache and calls RangeFunc on each
+// cached entry
+func (a *Allocator) ForeachCache(cb RangeFunc) {
+ a.mainCache.foreach(cb)
+
+ a.remoteCachesMutex.RLock()
+ for _, rc := range a.remoteCaches {
+ rc.cache.foreach(cb)
+ }
+ a.remoteCachesMutex.RUnlock()
+}
+
+// selectAvailableID selects an available ID.
+// Returns a triple of the selected ID ORed with prefixMask, the ID string and
+// the originally selected ID.
+func (a *Allocator) selectAvailableID() (idpool.ID, string, idpool.ID) {
+ if id := a.idPool.LeaseAvailableID(); id != idpool.NoID {
+ unmaskedID := id
+ id |= a.prefixMask
+ return id, id.String(), unmaskedID
+ }
+
+ return 0, "", 0
+}
+
+// AllocatorKey is the interface to implement in order for a type to be used as
+// key for the allocator. The key's data is assumed to be a collection of
+// pkg/label.Label, and the functions reflect this somewhat.
+type AllocatorKey interface {
+ fmt.Stringer
+
+ // GetKey returns the canonical string representation of the key
+ GetKey() string
+
+ // PutKey stores the information in v into the key. This is the inverse
+ // operation to GetKey
+ PutKey(v string) AllocatorKey
+
+ // GetAsMap returns the key as a collection of "labels" with a key and value.
+ // This is the inverse operation to PutKeyFromMap.
+ GetAsMap() map[string]string
+
+ // PutKeyFromMap stores the labels in v into the key to be used later. This
+ // is the inverse operation to GetAsMap.
+ PutKeyFromMap(v map[string]string) AllocatorKey
+}
+
+func (a *Allocator) encodeKey(key AllocatorKey) string {
+ return a.backend.Encode(key.GetKey())
+}
+
+// Return values:
+// 1. allocated ID
+// 2. whether the ID is newly allocated from kvstore
+// 3. whether this is the first owner that holds a reference to the key in
+// localkeys store
+// 4. error in case of failure
+func (a *Allocator) lockedAllocate(ctx context.Context, key AllocatorKey) (idpool.ID, bool, bool, error) {
+ var firstUse bool
+
+ kvstore.Trace("Allocating key in kvstore", nil, logrus.Fields{fieldKey: key})
+
+ k := a.encodeKey(key)
+ lock, err := a.backend.Lock(ctx, key)
+ if err != nil {
+ return 0, false, false, err
+ }
+
+ defer lock.Unlock(context.Background())
+
+ // fetch first key that matches /value/ while ignoring the
+ // node suffix
+ value, err := a.GetIfLocked(ctx, key, lock)
+ if err != nil {
+ return 0, false, false, err
+ }
+
+ kvstore.Trace("kvstore state is: ", nil, logrus.Fields{fieldID: value})
+
+ a.slaveKeysMutex.Lock()
+ defer a.slaveKeysMutex.Unlock()
+
+ // We shouldn't assume the fact the master key does not exist in the kvstore
+ // that localKeys does not have it. The KVStore might have lost all of its
+ // data but the local agent still holds a reference for the given master key.
+ if value == 0 {
+ value = a.localKeys.lookupKey(k)
+ if value != 0 {
+ // re-create master key
+ if err := a.backend.UpdateKeyIfLocked(ctx, value, key, true, lock); err != nil {
+ return 0, false, false, fmt.Errorf("unable to re-create missing master key '%s': %s while allocating ID: %s", key, value, err)
+ }
+ }
+ } else {
+ _, firstUse, err = a.localKeys.allocate(k, key, value)
+ if err != nil {
+ return 0, false, false, fmt.Errorf("unable to reserve local key '%s': %s", k, err)
+ }
+
+ if firstUse {
+ log.WithField(fieldKey, k).Debug("Reserved new local key")
+ } else {
+ log.WithField(fieldKey, k).Debug("Reusing existing local key")
+ }
+ }
+
+ if value != 0 {
+ log.WithField(fieldKey, k).Info("Reusing existing global key")
+
+ if err = a.backend.AcquireReference(ctx, value, key, lock); err != nil {
+ a.localKeys.release(k)
+ return 0, false, false, fmt.Errorf("unable to create slave key '%s': %s", k, err)
+ }
+
+ // mark the key as verified in the local cache
+ if err := a.localKeys.verify(k); err != nil {
+ log.WithError(err).Error("BUG: Unable to verify local key")
+ }
+
+ return value, false, firstUse, nil
+ }
+
+ log.WithField(fieldKey, k).Debug("Allocating new master ID")
+ id, strID, unmaskedID := a.selectAvailableID()
+ if id == 0 {
+ return 0, false, false, fmt.Errorf("no more available IDs in configured space")
+ }
+
+ kvstore.Trace("Selected available key ID", nil, logrus.Fields{fieldID: id})
+
+ releaseKeyAndID := func() {
+ a.localKeys.release(k)
+ a.idPool.Release(unmaskedID) // This returns this ID to be re-used for other keys
+ }
+
+ oldID, firstUse, err := a.localKeys.allocate(k, key, id)
+ if err != nil {
+ a.idPool.Release(unmaskedID)
+ return 0, false, false, fmt.Errorf("unable to reserve local key '%s': %s", k, err)
+ }
+
+ // Another local writer beat us to allocating an ID for the same key,
+ // start over
+ if id != oldID {
+ releaseKeyAndID()
+ return 0, false, false, fmt.Errorf("another writer has allocated key %s", k)
+ }
+
+ // Check that this key has not been allocated in the cluster during our
+ // operation here
+ value, err = a.GetNoCache(ctx, key)
+ if err != nil {
+ releaseKeyAndID()
+ return 0, false, false, err
+ }
+ if value != 0 {
+ releaseKeyAndID()
+ return 0, false, false, fmt.Errorf("Found master key after proceeding with new allocation for %s", k)
+ }
+
+ err = a.backend.AllocateIDIfLocked(ctx, id, key, lock)
+ if err != nil {
+ // Creation failed. Another agent most likely beat us to allocting this
+ // ID, retry.
+ releaseKeyAndID()
+ return 0, false, false, fmt.Errorf("unable to allocate ID %s for key %s: %s", strID, key, err)
+ }
+
+ // Notify pool that leased ID is now in-use.
+ a.idPool.Use(unmaskedID)
+
+ if err = a.backend.AcquireReference(ctx, id, key, lock); err != nil {
+ // We will leak the master key here as the key has already been
+ // exposed and may be in use by other nodes. The garbage
+ // collector will release it again.
+ releaseKeyAndID()
+ return 0, false, false, fmt.Errorf("slave key creation failed '%s': %s", k, err)
+ }
+
+ // mark the key as verified in the local cache
+ if err := a.localKeys.verify(k); err != nil {
+ log.WithError(err).Error("BUG: Unable to verify local key")
+ }
+
+ log.WithField(fieldKey, k).Info("Allocated new global key")
+
+ return id, true, firstUse, nil
+}
+
+// Allocate will retrieve the ID for the provided key. If no ID has been
+// allocated for this key yet, a key will be allocated. If allocation fails,
+// most likely due to a parallel allocation of the same ID by another user,
+// allocation is re-attempted for maxAllocAttempts times.
+//
+// Return values:
+// 1. allocated ID
+// 2. whether the ID is newly allocated from kvstore
+// 3. whether this is the first owner that holds a reference to the key in
+// localkeys store
+// 4. error in case of failure
+func (a *Allocator) Allocate(ctx context.Context, key AllocatorKey) (idpool.ID, bool, bool, error) {
+ var (
+ err error
+ value idpool.ID
+ isNew bool
+ firstUse bool
+ k = a.encodeKey(key)
+ )
+
+ log.WithField(fieldKey, key).Debug("Allocating key")
+
+ select {
+ case <-a.initialListDone:
+ case <-ctx.Done():
+ return 0, false, false, fmt.Errorf("allocation was cancelled while waiting for initial key list to be received: %s", ctx.Err())
+ }
+
+ kvstore.Trace("Allocating from kvstore", nil, logrus.Fields{fieldKey: key})
+
+ // make a copy of the template and customize it
+ boff := a.backoffTemplate
+ boff.Name = key.String()
+
+ for attempt := 0; attempt < maxAllocAttempts; attempt++ {
+ // Check our list of local keys already in use and increment the
+ // refcnt. The returned key must be released afterwards. No kvstore
+ // operation was performed for this allocation.
+ // We also do this on every loop as a different Allocate call might have
+ // allocated the key while we are attempting to allocate in this
+ // execution thread. It does not hurt to check if localKeys contains a
+ // reference for the key that we are attempting to allocate.
+ if val := a.localKeys.use(k); val != idpool.NoID {
+ kvstore.Trace("Reusing local id", nil, logrus.Fields{fieldID: val, fieldKey: key})
+ a.mainCache.insert(key, val)
+ return val, false, false, nil
+ }
+
+ // FIXME: Add non-locking variant
+ value, isNew, firstUse, err = a.lockedAllocate(ctx, key)
+ if err == nil {
+ a.mainCache.insert(key, value)
+ log.WithField(fieldKey, key).WithField(fieldID, value).Debug("Allocated key")
+ return value, isNew, firstUse, nil
+ }
+
+ scopedLog := log.WithFields(logrus.Fields{
+ fieldKey: key,
+ logfields.Attempt: attempt,
+ })
+
+ select {
+ case <-ctx.Done():
+ scopedLog.WithError(ctx.Err()).Warning("Ongoing key allocation has been cancelled")
+ return 0, false, false, fmt.Errorf("key allocation cancelled: %s", ctx.Err())
+ default:
+ scopedLog.WithError(err).Warning("Key allocation attempt failed")
+ }
+
+ kvstore.Trace("Allocation attempt failed", err, logrus.Fields{fieldKey: key, logfields.Attempt: attempt})
+
+ if waitErr := boff.Wait(ctx); waitErr != nil {
+ return 0, false, false, waitErr
+ }
+ }
+
+ return 0, false, false, err
+}
+
+// GetIfLocked returns the ID which is allocated to a key. Returns an ID of NoID if no ID
+// has been allocated to this key yet if the client is still holding the given
+// lock.
+func (a *Allocator) GetIfLocked(ctx context.Context, key AllocatorKey, lock kvstore.KVLocker) (idpool.ID, error) {
+ if id := a.mainCache.get(a.encodeKey(key)); id != idpool.NoID {
+ return id, nil
+ }
+
+ return a.backend.GetIfLocked(ctx, key, lock)
+}
+
+// Get returns the ID which is allocated to a key. Returns an ID of NoID if no ID
+// has been allocated to this key yet.
+func (a *Allocator) Get(ctx context.Context, key AllocatorKey) (idpool.ID, error) {
+ if id := a.mainCache.get(a.encodeKey(key)); id != idpool.NoID {
+ return id, nil
+ }
+
+ return a.GetNoCache(ctx, key)
+}
+
+// GetNoCache returns the ID which is allocated to a key in the kvstore,
+// bypassing the local copy of allocated keys.
+func (a *Allocator) GetNoCache(ctx context.Context, key AllocatorKey) (idpool.ID, error) {
+ return a.backend.Get(ctx, key)
+}
+
+// GetByID returns the key associated with an ID. Returns nil if no key is
+// associated with the ID.
+func (a *Allocator) GetByID(ctx context.Context, id idpool.ID) (AllocatorKey, error) {
+ if key := a.mainCache.getByID(id); key != nil {
+ return key, nil
+ }
+
+ return a.backend.GetByID(ctx, id)
+}
+
+// GetIncludeRemoteCaches returns the ID which is allocated to a key. Includes the
+// caches of watched remote kvstores in the query. Returns an ID of NoID if no
+// ID has been allocated in any remote kvstore to this key yet.
+func (a *Allocator) GetIncludeRemoteCaches(ctx context.Context, key AllocatorKey) (idpool.ID, error) {
+ encoded := a.encodeKey(key)
+
+ // check main cache first
+ if id := a.mainCache.get(encoded); id != idpool.NoID {
+ return id, nil
+ }
+
+ // check remote caches
+ a.remoteCachesMutex.RLock()
+ for _, rc := range a.remoteCaches {
+ if id := rc.cache.get(encoded); id != idpool.NoID {
+ a.remoteCachesMutex.RUnlock()
+ return id, nil
+ }
+ }
+ a.remoteCachesMutex.RUnlock()
+
+ // check main backend
+ if id, err := a.backend.Get(ctx, key); id != idpool.NoID || err != nil {
+ return id, err
+ }
+
+ // we skip checking remote backends explicitly here, to avoid
+ // accidentally overloading them in case of lookups for invalid identities
+
+ return idpool.NoID, nil
+}
+
+// GetByIDIncludeRemoteCaches returns the key associated with an ID. Includes
+// the caches of watched remote kvstores in the query.
+// Returns nil if no key is associated with the ID.
+func (a *Allocator) GetByIDIncludeRemoteCaches(ctx context.Context, id idpool.ID) (AllocatorKey, error) {
+ // check main cache first
+ if key := a.mainCache.getByID(id); key != nil {
+ return key, nil
+ }
+
+ // check remote caches
+ a.remoteCachesMutex.RLock()
+ for _, rc := range a.remoteCaches {
+ if key := rc.cache.getByID(id); key != nil {
+ a.remoteCachesMutex.RUnlock()
+ return key, nil
+ }
+ }
+ a.remoteCachesMutex.RUnlock()
+
+ // check main backend
+ if key, err := a.backend.GetByID(ctx, id); key != nil || err != nil {
+ return key, err
+ }
+
+ // we skip checking remote backends explicitly here, to avoid
+ // accidentally overloading them in case of lookups for invalid identities
+
+ return nil, nil
+}
+
+// Release releases the use of an ID associated with the provided key. After
+// the last user has released the ID, the key is removed in the KVstore and
+// the returned lastUse value is true.
+func (a *Allocator) Release(ctx context.Context, key AllocatorKey) (lastUse bool, err error) {
+ log.WithField(fieldKey, key).Info("Releasing key")
+
+ select {
+ case <-a.initialListDone:
+ case <-ctx.Done():
+ return false, fmt.Errorf("release was cancelled while waiting for initial key list to be received: %s", ctx.Err())
+ }
+
+ k := a.encodeKey(key)
+
+ a.slaveKeysMutex.Lock()
+ defer a.slaveKeysMutex.Unlock()
+
+ // release the key locally, if it was the last use, remove the node
+ // specific value key to remove the global reference mark
+ var id idpool.ID
+ lastUse, id, err = a.localKeys.release(k)
+ if err != nil {
+ return lastUse, err
+ }
+ if lastUse {
+ // Since in CRD mode we don't have a way to map which identity is being
+ // used by a node, we need to also pass the ID to the release function.
+ // This allows the CRD store to find the right identity by its ID and
+ // remove the node reference on that identity.
+ a.backend.Release(ctx, id, key)
+ }
+
+ return lastUse, err
+}
+
+// RunGC scans the kvstore for unused master keys and removes them
+func (a *Allocator) RunGC(rateLimit *rate.Limiter, staleKeysPrevRound map[string]uint64) (map[string]uint64, *GCStats, error) {
+ return a.backend.RunGC(context.TODO(), rateLimit, staleKeysPrevRound, a.min, a.max)
+}
+
+// RunLocksGC scans the kvstore for stale locks and removes them
+func (a *Allocator) RunLocksGC(ctx context.Context, staleLocksPrevRound map[string]kvstore.Value) (map[string]kvstore.Value, error) {
+ return a.backend.RunLocksGC(ctx, staleLocksPrevRound)
+}
+
+// DeleteAllKeys will delete all keys. It is expected to be used in tests.
+func (a *Allocator) DeleteAllKeys() {
+ a.backend.DeleteAllKeys(context.TODO())
+}
+
+// syncLocalKeys checks the kvstore and verifies that a master key exists for
+// all locally used allocations. This will restore master keys if deleted for
+// some reason.
+func (a *Allocator) syncLocalKeys() error {
+ // Create a local copy of all local allocations to not require to hold
+ // any locks while performing kvstore operations. Local use can
+ // disappear while we perform the sync but that is fine as worst case,
+ // a master key is created for a slave key that no longer exists. The
+ // garbage collector will remove it again.
+ ids := a.localKeys.getVerifiedIDs()
+
+ for id, value := range ids {
+ if err := a.backend.UpdateKey(context.TODO(), id, value, false); err != nil {
+ log.WithError(err).WithFields(logrus.Fields{
+ fieldKey: value,
+ fieldID: id,
+ }).Warning("Unable to sync key")
+ }
+ }
+
+ return nil
+}
+
+func (a *Allocator) startLocalKeySync() {
+ go func(a *Allocator) {
+ kvTimer, kvTimerDone := inctimer.New()
+ defer kvTimerDone()
+ for {
+ if err := a.syncLocalKeys(); err != nil {
+ log.WithError(err).Warning("Unable to run local key sync routine")
+ }
+
+ select {
+ case <-a.stopGC:
+ log.Debug("Stopped master key sync routine")
+ return
+ case <-kvTimer.After(option.Config.KVstorePeriodicSync):
+ }
+ }
+ }(a)
+}
+
+// AllocatorEventChan is a channel to receive allocator events on
+type AllocatorEventChan chan AllocatorEvent
+
+// Send- and receive-only versions of the above.
+type AllocatorEventRecvChan = <-chan AllocatorEvent
+type AllocatorEventSendChan = chan<- AllocatorEvent
+
+// AllocatorEvent is an event sent over AllocatorEventChan
+type AllocatorEvent struct {
+ // Typ is the type of event (create / modify / delete)
+ Typ kvstore.EventType
+
+ // ID is the allocated ID
+ ID idpool.ID
+
+ // Key is the key associated with the ID
+ Key AllocatorKey
+}
+
+// RemoteCache represents the cache content of an additional kvstore managing
+// identities. The contents are not directly accessible but will be merged into
+// the ForeachCache() function.
+type RemoteCache struct {
+ name string
+
+ allocator *Allocator
+ cache *cache
+
+ watchFunc func(ctx context.Context, remote *RemoteCache, onSync func(context.Context))
+}
+
+func (a *Allocator) NewRemoteCache(remoteName string, remoteAlloc *Allocator) *RemoteCache {
+ return &RemoteCache{
+ name: remoteName,
+ allocator: remoteAlloc,
+ cache: &remoteAlloc.mainCache,
+
+ watchFunc: a.WatchRemoteKVStore,
+ }
+}
+
+// WatchRemoteKVStore starts watching an allocator base prefix the kvstore
+// represents by the provided backend. A local cache of all identities of that
+// kvstore will be maintained in the RemoteCache structure returned and will
+// start being reported in the identities returned by the ForeachCache()
+// function. RemoteName should be unique per logical "remote".
+func (a *Allocator) WatchRemoteKVStore(ctx context.Context, rc *RemoteCache, onSync func(context.Context)) {
+ scopedLog := log.WithField(logfields.ClusterName, rc.name)
+ scopedLog.Info("Starting remote kvstore watcher")
+
+ rc.allocator.start()
+
+ select {
+ case <-ctx.Done():
+ scopedLog.Debug("Context canceled before remote kvstore watcher synchronization completed: stale identities will now be drained")
+ rc.close()
+
+ a.remoteCachesMutex.RLock()
+ old := a.remoteCaches[rc.name]
+ a.remoteCachesMutex.RUnlock()
+
+ if old != nil {
+ old.cache.mutex.RLock()
+ defer old.cache.mutex.RUnlock()
+ }
+
+ // Drain all entries that might have been received until now, and that
+ // are not present in the current cache (if any). This ensures we do not
+ // leak any stale identity, and at the same time we do not invalidate the
+ // current state.
+ rc.cache.drainIf(func(id idpool.ID) bool {
+ if old == nil {
+ return true
+ }
+
+ _, ok := old.cache.nextCache[id]
+ return !ok
+ })
+ return
+
+ case <-rc.cache.listDone:
+ scopedLog.Info("Remote kvstore watcher successfully synchronized and registered")
+ }
+
+ a.remoteCachesMutex.Lock()
+ old := a.remoteCaches[rc.name]
+ a.remoteCaches[rc.name] = rc
+ a.remoteCachesMutex.Unlock()
+
+ if old != nil {
+ // In case of reconnection, let's emit a deletion event for all stale identities
+ // that are no longer present in the kvstore. We take the lock of the new cache
+ // to ensure that we observe a stable state during this process (i.e., no keys
+ // are added/removed in the meanwhile).
+ scopedLog.Debug("Another kvstore watcher was already registered: deleting stale identities")
+ rc.cache.mutex.RLock()
+ old.cache.drainIf(func(id idpool.ID) bool {
+ _, ok := rc.cache.nextCache[id]
+ return !ok
+ })
+ rc.cache.mutex.RUnlock()
+ }
+
+ // Execute the on-sync callback handler.
+ onSync(ctx)
+
+ <-ctx.Done()
+ rc.close()
+ scopedLog.Info("Stopped remote kvstore watcher")
+}
+
+// RemoveRemoteKVStore removes any reference to a remote allocator / kvstore, emitting
+// a deletion event for all previously known identities.
+func (a *Allocator) RemoveRemoteKVStore(remoteName string) {
+ a.remoteCachesMutex.Lock()
+ old := a.remoteCaches[remoteName]
+ delete(a.remoteCaches, remoteName)
+ a.remoteCachesMutex.Unlock()
+
+ if old != nil {
+ old.cache.drain()
+ log.WithField(logfields.ClusterName, remoteName).Info("Remote kvstore watcher unregistered")
+ }
+}
+
+// Watch starts watching the remote kvstore and synchronize the identities in
+// the local cache. It blocks until the context is closed.
+func (rc *RemoteCache) Watch(ctx context.Context, onSync func(context.Context)) {
+ rc.watchFunc(ctx, rc, onSync)
+}
+
+// NumEntries returns the number of entries in the remote cache
+func (rc *RemoteCache) NumEntries() int {
+ if rc == nil {
+ return 0
+ }
+
+ return rc.cache.numEntries()
+}
+
+// Synced returns whether the initial list of entries has been retrieved from
+// the kvstore, and new events are currently being watched.
+func (rc *RemoteCache) Synced() bool {
+ if rc == nil {
+ return false
+ }
+
+ select {
+ case <-rc.cache.stopChan:
+ return false
+ default:
+ select {
+ case <-rc.cache.listDone:
+ return true
+ default:
+ return false
+ }
+ }
+}
+
+// close stops watching for identities in the kvstore associated with the
+// remote cache.
+func (rc *RemoteCache) close() {
+ rc.cache.allocator.Delete()
+}
+
+// Observe the identity changes. Conforms to stream.Observable.
+// Replays the current state of the cache when subscribing.
+func (a *Allocator) Observe(ctx context.Context, next func(AllocatorChange), complete func(error)) {
+ a.mainCache.Observe(ctx, next, complete)
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/allocator/cache.go b/vendor/github.com/cilium/cilium/pkg/allocator/cache.go
new file mode 100644
index 000000000..509ec4eef
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/allocator/cache.go
@@ -0,0 +1,338 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package allocator
+
+import (
+ "context"
+ "sync"
+ "time"
+
+ "github.com/sirupsen/logrus"
+
+ "github.com/cilium/cilium/pkg/idpool"
+ "github.com/cilium/cilium/pkg/kvstore"
+ "github.com/cilium/cilium/pkg/lock"
+ "github.com/cilium/cilium/pkg/stream"
+)
+
+// backendOpTimeout is the time allowed for operations sent to backends in
+// response to events such as create/modify/delete.
+const backendOpTimeout = 10 * time.Second
+
+// idMap provides mapping from ID to an AllocatorKey
+type idMap map[idpool.ID]AllocatorKey
+
+// keyMap provides mapping from AllocatorKey to ID
+type keyMap map[string]idpool.ID
+
+type cache struct {
+ allocator *Allocator
+
+ stopChan chan struct{}
+
+ // mutex protects all cache data structures
+ mutex lock.RWMutex
+
+ // cache is a local cache of all IDs allocated in the kvstore. It is
+ // being maintained by watching for kvstore events and can thus lag
+ // behind.
+ cache idMap
+
+ // keyCache shadows cache and allows access by key
+ keyCache keyMap
+
+ // nextCache is the cache is constantly being filled by startWatch(),
+ // when startWatch has successfully performed the initial fill using
+ // ListPrefix, the cache above will be pointed to nextCache. If the
+ // startWatch() fails to perform the initial list, then the cache is
+ // never pointed to nextCache. This guarantees that a valid cache is
+ // kept at all times.
+ nextCache idMap
+
+ // nextKeyCache follows the same logic as nextCache but for keyCache
+ nextKeyCache keyMap
+
+ listDone waitChan
+
+ // stopWatchWg is a wait group that gets conditions added when a
+ // watcher is started with the conditions marked as done when the
+ // watcher has exited
+ stopWatchWg sync.WaitGroup
+
+ changeSrc stream.Observable[AllocatorChange]
+ emitChange func(AllocatorChange)
+ completeChangeSrc func(error)
+}
+
+func newCache(a *Allocator) (c cache) {
+ c = cache{
+ allocator: a,
+ cache: idMap{},
+ keyCache: keyMap{},
+ stopChan: make(chan struct{}),
+ }
+ c.changeSrc, c.emitChange, c.completeChangeSrc = stream.Multicast[AllocatorChange]()
+ return
+}
+
+type waitChan chan struct{}
+
+// CacheMutations are the operations given to a Backend's ListAndWatch command.
+// They are called on changes to identities.
+type CacheMutations interface {
+ // OnListDone is called when the initial full-sync is complete.
+ OnListDone()
+
+ // OnAdd is called when a new key->ID appears.
+ OnAdd(id idpool.ID, key AllocatorKey)
+
+ // OnModify is called when a key->ID mapping is modified. This may happen
+ // when leases are updated, and does not mean the actual mapping had changed.
+ OnModify(id idpool.ID, key AllocatorKey)
+
+ // OnDelete is called when a key->ID mapping is removed. This may trigger
+ // master-key protection, if enabled, where the local allocator will recreate
+ // the key->ID association is recreated because the local node is still using
+ // it.
+ OnDelete(id idpool.ID, key AllocatorKey)
+}
+
+func (c *cache) sendEvent(typ kvstore.EventType, id idpool.ID, key AllocatorKey) {
+ if events := c.allocator.events; events != nil {
+ events <- AllocatorEvent{Typ: typ, ID: id, Key: key}
+ }
+}
+
+func (c *cache) OnListDone() {
+ c.mutex.Lock()
+ // nextCache is valid, point the live cache to it
+ c.cache = c.nextCache
+ c.keyCache = c.nextKeyCache
+ c.mutex.Unlock()
+
+ log.Debug("Initial list of identities received")
+
+ // report that the list operation has
+ // been completed and the allocator is
+ // ready to use
+ close(c.listDone)
+}
+
+func (c *cache) OnAdd(id idpool.ID, key AllocatorKey) {
+ c.mutex.Lock()
+ defer c.mutex.Unlock()
+
+ c.nextCache[id] = key
+ if key != nil {
+ c.nextKeyCache[c.allocator.encodeKey(key)] = id
+ }
+ c.allocator.idPool.Remove(id)
+
+ c.emitChange(AllocatorChange{Kind: AllocatorChangeUpsert, ID: id, Key: key})
+
+ c.sendEvent(kvstore.EventTypeCreate, id, key)
+}
+
+func (c *cache) OnModify(id idpool.ID, key AllocatorKey) {
+ c.mutex.Lock()
+ defer c.mutex.Unlock()
+
+ if k, ok := c.nextCache[id]; ok {
+ delete(c.nextKeyCache, c.allocator.encodeKey(k))
+ }
+
+ c.nextCache[id] = key
+ if key != nil {
+ c.nextKeyCache[c.allocator.encodeKey(key)] = id
+ }
+
+ c.emitChange(AllocatorChange{Kind: AllocatorChangeUpsert, ID: id, Key: key})
+
+ c.sendEvent(kvstore.EventTypeModify, id, key)
+}
+
+func (c *cache) OnDelete(id idpool.ID, key AllocatorKey) {
+ c.mutex.Lock()
+ defer c.mutex.Unlock()
+
+ c.onDeleteLocked(id, key)
+}
+
+// onDeleteLocked must be called while holding c.Mutex for writing
+func (c *cache) onDeleteLocked(id idpool.ID, key AllocatorKey) {
+ a := c.allocator
+ if a.enableMasterKeyProtection {
+ if value := a.localKeys.lookupID(id); value != nil {
+ ctx, cancel := context.WithTimeout(context.TODO(), backendOpTimeout)
+ defer cancel()
+ err := a.backend.UpdateKey(ctx, id, value, true)
+ if err != nil {
+ log.WithError(err).Errorf("OnDelete MasterKeyProtection update for key %q", id)
+ }
+ return
+ }
+ }
+
+ if k, ok := c.nextCache[id]; ok && k != nil {
+ delete(c.nextKeyCache, c.allocator.encodeKey(k))
+ }
+
+ delete(c.nextCache, id)
+ a.idPool.Insert(id)
+
+ c.emitChange(AllocatorChange{Kind: AllocatorChangeDelete, ID: id, Key: key})
+
+ c.sendEvent(kvstore.EventTypeDelete, id, key)
+}
+
+// start requests a LIST operation from the kvstore and starts watching the
+// prefix in a go subroutine.
+func (c *cache) start() waitChan {
+ c.listDone = make(waitChan)
+
+ c.mutex.Lock()
+
+ // start with a fresh nextCache
+ c.nextCache = idMap{}
+ c.nextKeyCache = keyMap{}
+ c.mutex.Unlock()
+
+ c.stopWatchWg.Add(1)
+
+ go func() {
+ c.allocator.backend.ListAndWatch(context.TODO(), c, c.stopChan)
+ c.stopWatchWg.Done()
+ }()
+
+ return c.listDone
+}
+
+func (c *cache) stop() {
+ close(c.stopChan)
+ c.stopWatchWg.Wait()
+ c.completeChangeSrc(nil)
+}
+
+// drain emits a deletion event for all known IDs. It must be called after the
+// cache has been stopped, to ensure that no new events can be received afterwards.
+func (c *cache) drain() {
+ // Make sure we wait until the watch loop has been properly stopped.
+ c.stopWatchWg.Wait()
+
+ c.mutex.Lock()
+ for id, key := range c.nextCache {
+ c.onDeleteLocked(id, key)
+ }
+ c.mutex.Unlock()
+}
+
+// drainIf emits a deletion event for all known IDs that are stale according to
+// the isStale function. It must be called after the cache has been stopped, to
+// ensure that no new events can be received afterwards.
+func (c *cache) drainIf(isStale func(id idpool.ID) bool) {
+ // Make sure we wait until the watch loop has been properly stopped, otherwise
+ // new IDs might be added afterwards we complete the draining process.
+ c.stopWatchWg.Wait()
+
+ c.mutex.Lock()
+ for id, key := range c.nextCache {
+ if isStale(id) {
+ c.onDeleteLocked(id, key)
+ log.WithFields(logrus.Fields{fieldID: id, fieldKey: key}).
+ Debug("Stale identity deleted")
+ }
+ }
+ c.mutex.Unlock()
+}
+
+func (c *cache) get(key string) idpool.ID {
+ c.mutex.RLock()
+ if id, ok := c.keyCache[key]; ok {
+ c.mutex.RUnlock()
+ return id
+ }
+ c.mutex.RUnlock()
+
+ return idpool.NoID
+}
+
+func (c *cache) getByID(id idpool.ID) AllocatorKey {
+ c.mutex.RLock()
+ if v, ok := c.cache[id]; ok {
+ c.mutex.RUnlock()
+ return v
+ }
+ c.mutex.RUnlock()
+
+ return nil
+}
+
+func (c *cache) foreach(cb RangeFunc) {
+ c.mutex.RLock()
+ for k, v := range c.cache {
+ cb(k, v)
+ }
+ c.mutex.RUnlock()
+}
+
+func (c *cache) insert(key AllocatorKey, val idpool.ID) {
+ c.mutex.Lock()
+ c.nextCache[val] = key
+ c.nextKeyCache[c.allocator.encodeKey(key)] = val
+ c.mutex.Unlock()
+}
+
+func (c *cache) numEntries() int {
+ c.mutex.RLock()
+ defer c.mutex.RUnlock()
+ return len(c.nextCache)
+}
+
+type AllocatorChangeKind string
+
+const (
+ AllocatorChangeSync AllocatorChangeKind = "sync"
+ AllocatorChangeUpsert AllocatorChangeKind = "upsert"
+ AllocatorChangeDelete AllocatorChangeKind = "delete"
+)
+
+type AllocatorChange struct {
+ Kind AllocatorChangeKind
+ ID idpool.ID
+ Key AllocatorKey
+}
+
+// Observe the allocator changes. Conforms to stream.Observable.
+// Replays the current state of the cache when subscribing.
+func (c *cache) Observe(ctx context.Context, next func(AllocatorChange), complete func(error)) {
+ // This short-lived go routine serves the purpose of replaying the current state of the cache before starting
+ // to observe the actual source changeSrc. ChangeSrc is backed by a stream.FuncObservable, that will start its own
+ // go routine. Therefore, the current go routine will stop and free the lock on the mutex after the registration.
+ go func() {
+ // Wait until initial listing has completed before
+ // replaying the state.
+ select {
+ case <-c.listDone:
+ case <-ctx.Done():
+ complete(ctx.Err())
+ return
+ }
+
+ c.mutex.RLock()
+ defer c.mutex.RUnlock()
+
+ for id, key := range c.cache {
+ next(AllocatorChange{Kind: AllocatorChangeUpsert, ID: id, Key: key})
+ }
+
+ // Emit a sync event to inform the subscriber that it has received a consistent
+ // initial state.
+ next(AllocatorChange{Kind: AllocatorChangeSync})
+
+ // And subscribe to new events. Since we held the read-lock there won't be any
+ // missed or duplicate events.
+ c.changeSrc.Observe(ctx, next, complete)
+ }()
+
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/allocator/doc.go b/vendor/github.com/cilium/cilium/pkg/allocator/doc.go
new file mode 100644
index 000000000..26c9a9aad
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/allocator/doc.go
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Package allocator provides a kvstore based ID allocator
+package allocator
diff --git a/vendor/github.com/cilium/cilium/pkg/allocator/localkeys.go b/vendor/github.com/cilium/cilium/pkg/allocator/localkeys.go
new file mode 100644
index 000000000..48820d736
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/allocator/localkeys.go
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package allocator
+
+import (
+ "fmt"
+
+ "github.com/sirupsen/logrus"
+
+ "github.com/cilium/cilium/pkg/idpool"
+ "github.com/cilium/cilium/pkg/kvstore"
+ "github.com/cilium/cilium/pkg/lock"
+)
+
+type localKey struct {
+ val idpool.ID
+ key AllocatorKey
+ refcnt uint64
+
+ // verified is true when the key has been synced with the kvstore
+ verified bool
+}
+
+// localKeys is a map of keys in use locally. Keys can be used multiple times.
+// A refcnt is managed to know when a key is no longer in use
+type localKeys struct {
+ lock.RWMutex
+ keys map[string]*localKey
+ ids map[idpool.ID]*localKey
+}
+
+func newLocalKeys() *localKeys {
+ return &localKeys{
+ keys: map[string]*localKey{},
+ ids: map[idpool.ID]*localKey{},
+ }
+}
+
+// allocate creates an entry for key in localKeys if needed and increments the
+// refcnt. The value associated with the key must match the local cache or an
+// error is returned
+func (lk *localKeys) allocate(keyString string, key AllocatorKey, val idpool.ID) (idpool.ID, bool, error) {
+ lk.Lock()
+ defer lk.Unlock()
+
+ var firstUse bool
+
+ if k, ok := lk.keys[keyString]; ok {
+ if val != k.val {
+ return idpool.NoID, firstUse, fmt.Errorf("local key already allocated with different value (%s != %s)", val, k.val)
+ }
+
+ k.refcnt++
+ kvstore.Trace("Incremented local key refcnt", nil, logrus.Fields{fieldKey: keyString, fieldID: val, fieldRefCnt: k.refcnt})
+ return k.val, firstUse, nil
+ }
+
+ firstUse = true
+ k := &localKey{key: key, val: val, refcnt: 1}
+ lk.keys[keyString] = k
+ lk.ids[val] = k
+ kvstore.Trace("New local key", nil, logrus.Fields{fieldKey: keyString, fieldID: val, fieldRefCnt: 1})
+ return val, firstUse, nil
+}
+
+func (lk *localKeys) verify(key string) error {
+ lk.Lock()
+ defer lk.Unlock()
+
+ if k, ok := lk.keys[key]; ok {
+ k.verified = true
+ kvstore.Trace("Local key verified", nil, logrus.Fields{fieldKey: key})
+ return nil
+ }
+
+ return fmt.Errorf("key %s not found", key)
+}
+
+// lookupKey returns the idpool.ID of the key is present in the map of keys.
+// if it isn't present, returns idpool.NoID
+func (lk *localKeys) lookupKey(key string) idpool.ID {
+ lk.RLock()
+ defer lk.RUnlock()
+
+ if k, ok := lk.keys[key]; ok {
+ return k.val
+ }
+
+ return idpool.NoID
+}
+
+// lookupID returns the key for a given ID or an empty string
+func (lk *localKeys) lookupID(id idpool.ID) AllocatorKey {
+ lk.RLock()
+ defer lk.RUnlock()
+
+ if k, ok := lk.ids[id]; ok {
+ return k.key
+ }
+
+ return nil
+}
+
+// use increments the refcnt of the key and returns its value
+func (lk *localKeys) use(key string) idpool.ID {
+ lk.Lock()
+ defer lk.Unlock()
+
+ if k, ok := lk.keys[key]; ok {
+ // unverified keys behave as if they do not exist
+ if !k.verified {
+ return idpool.NoID
+ }
+
+ k.refcnt++
+ kvstore.Trace("Incremented local key refcnt", nil, logrus.Fields{fieldKey: key, fieldID: k.val, fieldRefCnt: k.refcnt})
+ return k.val
+ }
+
+ return idpool.NoID
+}
+
+// release releases the refcnt of a key. It returns the ID associated with the
+// given key. When the last reference was released, the key is deleted and the
+// returned lastUse value is true.
+func (lk *localKeys) release(key string) (lastUse bool, id idpool.ID, err error) {
+ lk.Lock()
+ defer lk.Unlock()
+ if k, ok := lk.keys[key]; ok {
+ k.refcnt--
+ kvstore.Trace("Decremented local key refcnt", nil, logrus.Fields{fieldKey: key, fieldID: k.val, fieldRefCnt: k.refcnt})
+ if k.refcnt == 0 {
+ delete(lk.keys, key)
+ delete(lk.ids, k.val)
+ return true, k.val, nil
+ }
+
+ return false, k.val, nil
+ }
+
+ return false, idpool.NoID, fmt.Errorf("unable to find key in local cache")
+}
+
+func (lk *localKeys) getVerifiedIDs() map[idpool.ID]AllocatorKey {
+ ids := map[idpool.ID]AllocatorKey{}
+ lk.RLock()
+ for id, localKey := range lk.ids {
+ if localKey.verified {
+ ids[id] = localKey.key
+ }
+ }
+ lk.RUnlock()
+
+ return ids
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/allocator/logfields.go b/vendor/github.com/cilium/cilium/pkg/allocator/logfields.go
new file mode 100644
index 000000000..59df55405
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/allocator/logfields.go
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package allocator
+
+const (
+ fieldID = "id"
+ fieldKey = "key"
+ fieldRefCnt = "refcnt"
+)
diff --git a/vendor/github.com/cilium/cilium/pkg/annotation/k8s.go b/vendor/github.com/cilium/cilium/pkg/annotation/k8s.go
new file mode 100644
index 000000000..213b568c1
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/annotation/k8s.go
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package annotation
+
+import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+const (
+ // Prefix is the common prefix for all annotations
+ Prefix = "io.cilium"
+
+ // ConfigPrefix is the common prefix for configuration related annotations.
+ ConfigPrefix = "config.cilium.io"
+
+ // IngressPrefix is the common prefix for ingress related annotations.
+ IngressPrefix = "ingress.cilium.io"
+
+ // NetworkPrefix is the common prefix for network related annotations.
+ NetworkPrefix = "network.cilium.io"
+
+ // PolicyPrefix is the common prefix for policy related annotations.
+ PolicyPrefix = "policy.cilium.io"
+
+ // ServicePrefix is the common prefix for service related annotations.
+ ServicePrefix = "service.cilium.io"
+
+ // IPAMPrefix is the common prefix for IPAM related annotations.
+ IPAMPrefix = "ipam.cilium.io"
+
+ // PolicyName / PolicyNameAlias is an optional annotation to the NetworkPolicy
+ // resource which specifies the name of the policy node to which all
+ // rules should be applied to.
+ PolicyName = PolicyPrefix + "/name"
+ PolicyNameAlias = Prefix + ".name"
+
+ // V4CIDRName / V4CIDRNameAlias is the annotation name used to store the IPv4
+ // pod CIDR in the node's annotations.
+ V4CIDRName = NetworkPrefix + "/ipv4-pod-cidr"
+ V4CIDRNameAlias = Prefix + ".network.ipv4-pod-cidr"
+ // V6CIDRName / V6CIDRNameAlias is the annotation name used to store the IPv6
+ // pod CIDR in the node's annotations.
+ V6CIDRName = NetworkPrefix + "/ipv6-pod-cidr"
+ V6CIDRNameAlias = Prefix + ".network.ipv6-pod-cidr"
+
+ // V4HealthName / V4HealthNameAlias is the annotation name used to store the
+ // IPv4 address of the cilium-health endpoint in the node's annotations.
+ V4HealthName = NetworkPrefix + "/ipv4-health-ip"
+ V4HealthNameAlias = Prefix + ".network.ipv4-health-ip"
+ // V6HealthName / V6HealthNameAlias is the annotation name used to store the
+ // IPv6 address of the cilium-health endpoint in the node's annotations.
+ V6HealthName = NetworkPrefix + "/ipv6-health-ip"
+ V6HealthNameAlias = Prefix + ".network.ipv6-health-ip"
+
+ // V4IngressName / V4IngressNameAlias is the annotation name used to store
+ // the IPv4 address of the Ingress listener in the node's annotations.
+ V4IngressName = NetworkPrefix + "/ipv4-Ingress-ip"
+ V4IngressNameAlias = Prefix + ".network.ipv4-Ingress-ip"
+ // V6IngressName / V6IngressNameAlias is the annotation name used to store
+ // the IPv6 address of the Ingress listener in the node's annotations.
+ V6IngressName = NetworkPrefix + "/ipv6-Ingress-ip"
+ V6IngressNameAlias = Prefix + ".network.ipv6-Ingress-ip"
+
+ // CiliumHostIP / CiliumHostIPAlias is the annotation name used to store the
+ // IPv4 address of the cilium host interface in the node's annotations.
+ CiliumHostIP = NetworkPrefix + "/ipv4-cilium-host"
+ CiliumHostIPAlias = Prefix + ".network.ipv4-cilium-host"
+
+ // CiliumHostIPv6 / CiliumHostIPv6Alias is the annotation name used to store
+ // the IPv6 address of the cilium host interface in the node's annotation.
+ CiliumHostIPv6 = NetworkPrefix + "/ipv6-cilium-host"
+ CiliumHostIPv6Alias = Prefix + ".network.ipv6-cilium-host"
+
+ // CiliumEncryptionKey / CiliumEncryptionKeyAlias is the annotation name used to
+ // store the encryption key of the cilium host interface in the node's annotation.
+ CiliumEncryptionKey = NetworkPrefix + "/encryption-key"
+ CiliumEncryptionKeyAlias = Prefix + ".network.encryption-key"
+
+ // GlobalService / GlobalServiceAlias if set to true, marks a service to
+ // become a global service.
+ GlobalService = ServicePrefix + "/global"
+ GlobalServiceAlias = Prefix + "/global-service"
+
+ // SharedService / SharedServiceAlias if set to false, prevents a service
+ // from being shared, the default is true if GlobalService is set, otherwise
+ // false. Setting the annotation SharedService to false while setting
+ // GlobalService to true allows to expose remote endpoints without
+ // sharing local endpoints.
+ SharedService = ServicePrefix + "/shared"
+ SharedServiceAlias = Prefix + "/shared-service"
+
+ // ServiceAffinity / ServiceAffinityAlias annotations determines the
+ // preferred endpoint destination.
+ // Allowed values:
+ // - local
+ // preferred endpoints from local cluster if available
+ // - remote
+ // preferred endpoints from remote cluster if available
+ // - none (default)
+ // no preference. Default behavior if this annotation does not exist
+ ServiceAffinity = ServicePrefix + "/affinity"
+ ServiceAffinityAlias = Prefix + "/service-affinity"
+
+ // ProxyVisibility / ProxyVisibilityAlias is the annotation name used to
+ // indicate whether proxy visibility should be enabled for a given pod (i.e.,
+ // all traffic for the pod is redirected to the proxy for the given port /
+ // protocol in the annotation
+ ProxyVisibility = PolicyPrefix + "/proxy-visibility"
+ ProxyVisibilityAlias = Prefix + ".proxy-visibility"
+
+ // NoTrack / NoTrackAlias is the annotation name used to store the port and
+ // protocol that we should bypass kernel conntrack for a given pod. This
+ // applies for both TCP and UDP connection. Current use case is NodeLocalDNS.
+ NoTrack = PolicyPrefix + "/no-track-port"
+ NoTrackAlias = Prefix + ".no-track-port"
+
+ // WireguardPubKey / WireguardPubKeyAlias is the annotation name used to store
+ // the WireGuard public key in the CiliumNode CRD that we need to use to encrypt
+ // traffic to that node.
+ WireguardPubKey = NetworkPrefix + "/wg-pub-key"
+ WireguardPubKeyAlias = Prefix + ".network.wg-pub-key"
+
+ // BGPVRouterAnnoPrefix is the prefix used for all Virtual Router annotations
+ // Its just a prefix, because the ASN of the Router is part of the annotation itself
+ BGPVRouterAnnoPrefix = "cilium.io/bgp-virtual-router."
+
+ // IPAMPoolKey is the annotation name used to store the IPAM pool name from
+ // which workloads should allocate their IP from
+ IPAMPoolKey = IPAMPrefix + "/ip-pool"
+
+ // IPAMIPv4PoolKey is the annotation name used to store the IPAM IPv4 pool name from
+ // which workloads should allocate their IP from
+ IPAMIPv4PoolKey = IPAMPrefix + "/ipv4-pool"
+
+ // IPAMIPv6PoolKey is the annotation name used to store the IPAM IPv6 pool name from
+ // which workloads should allocate their IP from
+ IPAMIPv6PoolKey = IPAMPrefix + "/ipv6-pool"
+)
+
+// Get returns the annotation value associated with the given key, or any of
+// the additional aliases if not found.
+func Get(obj metav1.Object, key string, aliases ...string) (value string, ok bool) {
+ keys := append([]string{key}, aliases...)
+ for _, k := range keys {
+ if value, ok = obj.GetAnnotations()[k]; ok {
+ return value, ok
+ }
+ }
+
+ return "", false
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/api/apidisable.go b/vendor/github.com/cilium/cilium/pkg/api/apidisable.go
new file mode 100644
index 000000000..ef3ed5d42
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/api/apidisable.go
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package api
+
+import (
+ "net/http"
+
+ "github.com/go-openapi/runtime/middleware"
+ "github.com/sirupsen/logrus"
+
+ "github.com/cilium/cilium/pkg/logging/logfields"
+)
+
+type AdminDisableHandler struct {
+ name string
+}
+
+func NewAdminDisableHandler(name string) *AdminDisableHandler {
+ return &AdminDisableHandler{
+ name: name,
+ }
+}
+
+func (a *AdminDisableHandler) ServeHTTP(wr http.ResponseWriter, req *http.Request) {
+ wr.WriteHeader(http.StatusForbidden)
+ log.WithFields(logrus.Fields{
+ logfields.Endpoint: a.name,
+ }).Info("Denied API request on administratively disabled API endpoint")
+ _, _ = wr.Write([]byte("This API is administratively disabled. Contact your administrator for more details."))
+}
+
+// DisableAPIs configures the API middleware for all of the paths in the
+// provided PathSet such that those APIs will be administratively disabled at
+// runtime.
+func DisableAPIs(paths PathSet, addMiddleware func(method, path string, builder middleware.Builder)) {
+ for k, pm := range paths {
+ addMiddleware(pm.Method, pm.Path, func(_ http.Handler) http.Handler {
+ return NewAdminDisableHandler(k)
+ })
+ }
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/api/apierror.go b/vendor/github.com/cilium/cilium/pkg/api/apierror.go
new file mode 100644
index 000000000..83c44eeba
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/api/apierror.go
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package api
+
+import (
+ "fmt"
+ "net/http"
+
+ "github.com/go-openapi/runtime"
+
+ "github.com/cilium/cilium/api/v1/models"
+)
+
+// APIError is the error representation for the API.
+type APIError struct {
+ code int
+ msg string
+}
+
+// New creates a API error from the code, msg and extra arguments.
+func New(code int, msg string, args ...interface{}) *APIError {
+ if code <= 0 {
+ code = 500
+ }
+
+ if len(args) > 0 {
+ return &APIError{code: code, msg: fmt.Sprintf(msg, args...)}
+ }
+ return &APIError{code: code, msg: msg}
+}
+
+// Error creates a new API error from the code and error.
+func Error(code int, err error) *APIError {
+ if err == nil {
+ err = fmt.Errorf("Error pointer was nil")
+ }
+
+ return New(code, err.Error())
+}
+
+// Error returns the API error message.
+func (a *APIError) Error() string {
+ return a.msg
+}
+
+// GetModel returns model error.
+func (a *APIError) GetModel() *models.Error {
+ m := models.Error(a.msg)
+ return &m
+}
+
+// WriteResponse to the client.
+func (a *APIError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {
+ rw.WriteHeader(a.code)
+ m := a.GetModel()
+ if err := producer.Produce(rw, m); err != nil {
+ panic(err)
+ }
+
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/api/apipanic.go b/vendor/github.com/cilium/cilium/pkg/api/apipanic.go
new file mode 100644
index 000000000..38481b26d
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/api/apipanic.go
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package api
+
+import (
+ "net/http"
+ "os"
+ "runtime/debug"
+
+ "github.com/sirupsen/logrus"
+
+ "github.com/cilium/cilium/pkg/logging"
+)
+
+// APIPanicHandler recovers from API panics and logs encountered panics
+type APIPanicHandler struct {
+ Next http.Handler
+}
+
+// ServeHTTP implements the http.Handler interface.
+// It recovers from panics of all next handlers and logs them
+func (h *APIPanicHandler) ServeHTTP(wr http.ResponseWriter, req *http.Request) {
+ defer func() {
+ if r := recover(); r != nil {
+ fields := logrus.Fields{
+ "panic_message": r,
+ "url": req.URL.String(),
+ "method": req.Method,
+ "client": req.RemoteAddr,
+ }
+ log.WithFields(fields).Warn("Cilium API handler panicked")
+ if logging.DefaultLogger.IsLevelEnabled(logrus.DebugLevel) {
+ os.Stdout.Write(debug.Stack())
+ }
+ wr.WriteHeader(http.StatusInternalServerError)
+ if _, err := wr.Write([]byte("Internal error occurred, check Cilium logs for details.")); err != nil {
+ log.WithError(err).Debug("Failed to write API response")
+ }
+ }
+ }()
+ h.Next.ServeHTTP(wr, req)
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/api/config.go b/vendor/github.com/cilium/cilium/pkg/api/config.go
new file mode 100644
index 000000000..1087927ce
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/api/config.go
@@ -0,0 +1,137 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package api
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/go-openapi/loads"
+ "github.com/go-openapi/spec"
+)
+
+var (
+ ErrUnknownWildcard = fmt.Errorf("Unsupported API wildcard")
+ ErrUnknownFlag = fmt.Errorf("Unknown API flag")
+)
+
+func pascalize(in string) string {
+ if len(in) < 2 {
+ return strings.ToUpper(in)
+ }
+ switch in {
+ case "bgp":
+ return "BGP"
+ case "id":
+ return "ID"
+ case "ip":
+ return "IP"
+ case "ipam":
+ return "IPAM"
+ case "lrp":
+ return "LRP"
+ }
+ return strings.ToUpper(in[0:1]) + strings.ToLower(in[1:])
+}
+
+func pathToFlagSuffix(path string) string {
+ result := ""
+ path = strings.TrimPrefix(path, "/")
+ for _, hunk := range strings.Split(path, "/") {
+ // TODO: Maybe we can just rename the /cgroup-dump-metadata API to /cgroups to avoid this loop?
+ for _, word := range strings.Split(hunk, "-") {
+ trimmed := strings.Trim(word, "{}")
+ result = result + pascalize(trimmed)
+ }
+ }
+
+ return result
+}
+
+func parseSpecPaths(paths *spec.Paths) PathSet {
+ results := make(PathSet)
+
+ for path, item := range paths.Paths {
+ suffix := pathToFlagSuffix(path)
+ ops := map[string]*spec.Operation{
+ "Delete": item.Delete,
+ "Get": item.Get,
+ "Patch": item.Patch,
+ "Post": item.Post,
+ "Put": item.Put,
+ }
+ for prefix, op := range ops {
+ if op != nil {
+ flag := prefix + suffix
+ results[flag] = Endpoint{
+ Method: strings.ToUpper(prefix),
+ Path: path,
+ Description: op.Description,
+ }
+ }
+ }
+ }
+
+ return PathSet(results)
+}
+
+func generateDeniedAPIEndpoints(allPaths PathSet, allowed []string) (PathSet, error) {
+ // default to "deny all", then allow specified APIs by flag
+ denied := allPaths
+
+ var wildcardPrefixes []string
+ for _, opt := range allowed {
+ switch strings.Index(opt, "*") {
+ case -1: // No wildcard
+ break
+ case len(opt) - 1: // suffix
+ prefix := strings.TrimSuffix(opt, "*")
+ if len(prefix) == 0 { // Full opt "*", ie allow all
+ return PathSet{}, nil
+ }
+ wildcardPrefixes = append(wildcardPrefixes, prefix)
+ continue
+ default:
+ return nil, fmt.Errorf("%w: %q", ErrUnknownWildcard, opt)
+ }
+ if _, ok := denied[opt]; ok {
+ delete(denied, opt)
+ } else {
+ return nil, fmt.Errorf("%w: %q", ErrUnknownFlag, opt)
+ }
+ }
+
+ for _, prefix := range wildcardPrefixes {
+ for f := range denied {
+ if strings.HasPrefix(f, prefix) {
+ delete(denied, f)
+ }
+ }
+ }
+ return denied, nil
+}
+
+// Endpoint is an API Endpoint for a parsed API specification.
+type Endpoint struct {
+ Method string
+ Path string
+ Description string
+}
+
+// PathSet is a set of APIs in the form of a map of canonical pascalized flag
+// name to MethodPath, for example:
+// "GetEndpointID": {"GET", "/endpoint/{id}"}
+type PathSet map[string]Endpoint
+
+func NewPathSet(spec *loads.Document) PathSet {
+ return parseSpecPaths(spec.Spec().Paths)
+}
+
+// AllowedFlagsToDeniedPaths parses the input API specification and the provided
+// commandline flags, and returns the PathSet that should be administratively
+// disabled using a subsequent call to DisableAPIs().
+func AllowedFlagsToDeniedPaths(spec *loads.Document, allowed []string) (PathSet, error) {
+ paths := parseSpecPaths(spec.Spec().Paths)
+ return generateDeniedAPIEndpoints(paths, allowed)
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/api/const.go b/vendor/github.com/cilium/cilium/pkg/api/const.go
new file mode 100644
index 000000000..356c6ddb3
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/api/const.go
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package api
+
+import (
+ "os"
+ "time"
+)
+
+const (
+ // CiliumGroupName is the cilium's unix group name.
+ CiliumGroupName = "cilium"
+ // SocketFileMode is the default file mode for the sockets.
+ SocketFileMode os.FileMode = 0660
+ // ClientTimeout specifies timeout to be used by clients
+ ClientTimeout = 90 * time.Second
+)
diff --git a/vendor/github.com/cilium/cilium/pkg/api/doc.go b/vendor/github.com/cilium/cilium/pkg/api/doc.go
new file mode 100644
index 000000000..83b4b5bfb
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/api/doc.go
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Package api provides the Cilium useful helpers for the external API
+package api
diff --git a/vendor/github.com/cilium/cilium/pkg/api/socket.go b/vendor/github.com/cilium/cilium/pkg/api/socket.go
new file mode 100644
index 000000000..6af237a1e
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/api/socket.go
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package api
+
+import (
+ "fmt"
+ "os"
+ "os/user"
+ "strconv"
+
+ "github.com/sirupsen/logrus"
+
+ "github.com/cilium/cilium/pkg/logging"
+ "github.com/cilium/cilium/pkg/logging/logfields"
+)
+
+var log = logging.DefaultLogger.WithField(logfields.LogSubsys, "api")
+
+// getGroupIDByName returns the group ID for the given grpName.
+func getGroupIDByName(grpName string) (int, error) {
+ group, err := user.LookupGroup(grpName)
+ if err != nil {
+ return -1, err
+ }
+ return strconv.Atoi(group.Gid)
+}
+
+// SetDefaultPermissions sets the given socket's group to `CiliumGroupName` and
+// mode to `SocketFileMode`.
+func SetDefaultPermissions(socketPath string) error {
+ gid, err := getGroupIDByName(CiliumGroupName)
+ if err != nil {
+ log.WithError(err).WithFields(logrus.Fields{
+ logfields.Path: socketPath,
+ "group": CiliumGroupName,
+ }).Debug("Group not found")
+ } else {
+ if err := os.Chown(socketPath, 0, gid); err != nil {
+ return fmt.Errorf("failed while setting up %s's group ID"+
+ " in %q: %s", CiliumGroupName, socketPath, err)
+ }
+ }
+ if err := os.Chmod(socketPath, SocketFileMode); err != nil {
+ return fmt.Errorf("failed while setting up file permissions in %q: %w",
+ socketPath, err)
+ }
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/aws/eni/types/doc.go b/vendor/github.com/cilium/cilium/pkg/aws/eni/types/doc.go
new file mode 100644
index 000000000..0b0cb83f7
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/aws/eni/types/doc.go
@@ -0,0 +1,7 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// +k8s:deepcopy-gen=package
+// +deepequal-gen=package
+
+package types
diff --git a/vendor/github.com/cilium/cilium/pkg/aws/eni/types/types.go b/vendor/github.com/cilium/cilium/pkg/aws/eni/types/types.go
new file mode 100644
index 000000000..aec96efcb
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/aws/eni/types/types.go
@@ -0,0 +1,270 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package types
+
+import (
+ "github.com/cilium/cilium/pkg/ipam/types"
+)
+
+// ENISpec is the ENI specification of a node. This specification is considered
+// by the cilium-operator to act as an IPAM operator and makes ENI IPs available
+// via the IPAMSpec section.
+//
+// The ENI specification can either be provided explicitly by the user or the
+// cilium agent running on the node can be instructed to create the CiliumNode
+// custom resource along with an ENI specification when the node registers
+// itself to the Kubernetes cluster.
+type ENISpec struct {
+ // InstanceID is the AWS InstanceId of the node. The InstanceID is used
+ // to retrieve AWS metadata for the node.
+ //
+ // OBSOLETE: This field is obsolete, please use Spec.InstanceID
+ //
+ // +kubebuilder:validation:Optional
+ InstanceID string `json:"instance-id,omitempty"`
+
+ // InstanceType is the AWS EC2 instance type, e.g. "m5.large"
+ //
+ // +kubebuilder:validation:Optional
+ InstanceType string `json:"instance-type,omitempty"`
+
+ // MinAllocate is the minimum number of IPs that must be allocated when
+ // the node is first bootstrapped. It defines the minimum base socket
+ // of addresses that must be available. After reaching this watermark,
+ // the PreAllocate and MaxAboveWatermark logic takes over to continue
+ // allocating IPs.
+ //
+ // OBSOLETE: This field is obsolete, please use Spec.IPAM.MinAllocate
+ //
+ // +kubebuilder:validation:Minimum=0
+ // +kubebuilder:validation:Optional
+ MinAllocate int `json:"min-allocate,omitempty"`
+
+ // PreAllocate defines the number of IP addresses that must be
+ // available for allocation in the IPAMspec. It defines the buffer of
+ // addresses available immediately without requiring cilium-operator to
+ // get involved.
+ //
+ // OBSOLETE: This field is obsolete, please use Spec.IPAM.PreAllocate
+ //
+ // +kubebuilder:validation:Minimum=0
+ // +kubebuilder:validation:Optional
+ PreAllocate int `json:"pre-allocate,omitempty"`
+
+ // MaxAboveWatermark is the maximum number of addresses to allocate
+ // beyond the addresses needed to reach the PreAllocate watermark.
+ // Going above the watermark can help reduce the number of API calls to
+ // allocate IPs, e.g. when a new ENI is allocated, as many secondary
+ // IPs as possible are allocated. Limiting the amount can help reduce
+ // waste of IPs.
+ //
+ // OBSOLETE: This field is obsolete, please use Spec.IPAM.MaxAboveWatermark
+ //
+ // +kubebuilder:validation:Minimum=0
+ // +kubebuilder:validation:Optional
+ MaxAboveWatermark int `json:"max-above-watermark,omitempty"`
+
+ // FirstInterfaceIndex is the index of the first ENI to use for IP
+ // allocation, e.g. if the node has eth0, eth1, eth2 and
+ // FirstInterfaceIndex is set to 1, then only eth1 and eth2 will be
+ // used for IP allocation, eth0 will be ignored for PodIP allocation.
+ //
+ // +kubebuilder:validation:Minimum=0
+ // +kubebuilder:validation:Optional
+ FirstInterfaceIndex *int `json:"first-interface-index,omitempty"`
+
+ // SecurityGroups is the list of security groups to attach to any ENI
+ // that is created and attached to the instance.
+ //
+ // +kubebuilder:validation:Optional
+ SecurityGroups []string `json:"security-groups,omitempty"`
+
+ // SecurityGroupTags is the list of tags to use when evaliating what
+ // AWS security groups to use for the ENI.
+ //
+ // +kubebuilder:validation:Optional
+ SecurityGroupTags map[string]string `json:"security-group-tags,omitempty"`
+
+ // SubnetIDs is the list of subnet ids to use when evaluating what AWS
+ // subnets to use for ENI and IP allocation.
+ //
+ // +kubebuilder:validation:Optional
+ SubnetIDs []string `json:"subnet-ids,omitempty"`
+
+ // SubnetTags is the list of tags to use when evaluating what AWS
+ // subnets to use for ENI and IP allocation.
+ //
+ // +kubebuilder:validation:Optional
+ SubnetTags map[string]string `json:"subnet-tags,omitempty"`
+
+ // NodeSubnetID is the subnet of the primary ENI the instance was brought up
+ // with. It is used as a sensible default subnet to create ENIs in.
+ //
+ // +kubebuilder:validation:Optional
+ NodeSubnetID string `json:"node-subnet-id,omitempty"`
+
+ // VpcID is the VPC ID to use when allocating ENIs.
+ //
+ // +kubebuilder:validation:Optional
+ VpcID string `json:"vpc-id,omitempty"`
+
+ // AvailabilityZone is the availability zone to use when allocating
+ // ENIs.
+ //
+ // +kubebuilder:validation:Optional
+ AvailabilityZone string `json:"availability-zone,omitempty"`
+
+ // ExcludeInterfaceTags is the list of tags to use when excluding ENIs for
+ // Cilium IP allocation. Any interface matching this set of tags will not
+ // be managed by Cilium.
+ //
+ // +kubebuilder:validation:Optional
+ ExcludeInterfaceTags map[string]string `json:"exclude-interface-tags,omitempty"`
+
+ // DeleteOnTermination defines that the ENI should be deleted when the
+ // associated instance is terminated. If the parameter is not set the
+ // default behavior is to delete the ENI on instance termination.
+ //
+ // +kubebuilder:validation:Optional
+ DeleteOnTermination *bool `json:"delete-on-termination,omitempty"`
+
+ // UsePrimaryAddress determines whether an ENI's primary address
+ // should be available for allocations on the node
+ //
+ // +kubebuilder:validation:Optional
+ UsePrimaryAddress *bool `json:"use-primary-address,omitempty"`
+
+ // DisablePrefixDelegation determines whether ENI prefix delegation should be
+ // disabled on this node.
+ //
+ // +kubebuilder:validation:Optional
+ DisablePrefixDelegation *bool `json:"disable-prefix-delegation,omitempty"`
+}
+
+// ENI represents an AWS Elastic Network Interface
+//
+// More details:
+// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html
+type ENI struct {
+ // ID is the ENI ID
+ //
+ // +optional
+ ID string `json:"id,omitempty"`
+
+ // IP is the primary IP of the ENI
+ //
+ // +optional
+ IP string `json:"ip,omitempty"`
+
+ // MAC is the mac address of the ENI
+ //
+ // +optional
+ MAC string `json:"mac,omitempty"`
+
+ // AvailabilityZone is the availability zone of the ENI
+ //
+ // +optional
+ AvailabilityZone string `json:"availability-zone,omitempty"`
+
+ // Description is the description field of the ENI
+ //
+ // +optional
+ Description string `json:"description,omitempty"`
+
+ // Number is the interface index, it used in combination with
+ // FirstInterfaceIndex
+ //
+ // +optional
+ Number int `json:"number,omitempty"`
+
+ // Subnet is the subnet the ENI is associated with
+ //
+ // +optional
+ Subnet AwsSubnet `json:"subnet,omitempty"`
+
+ // VPC is the VPC information to which the ENI is attached to
+ //
+ // +optional
+ VPC AwsVPC `json:"vpc,omitempty"`
+
+ // Addresses is the list of all secondary IPs associated with the ENI
+ //
+ // +optional
+ Addresses []string `json:"addresses,omitempty"`
+
+ // Prefixes is the list of all /28 prefixes associated with the ENI
+ //
+ // +optional
+ Prefixes []string `json:"prefixes,omitempty"`
+
+ // SecurityGroups are the security groups associated with the ENI
+ SecurityGroups []string `json:"security-groups,omitempty"`
+
+ // Tags is the set of tags of the ENI. Used to detect ENIs which should
+ // not be managed by Cilium
+ //
+ // +optional
+ Tags map[string]string `json:"tags,omitempty"`
+}
+
+// InterfaceID returns the identifier of the interface
+func (e *ENI) InterfaceID() string {
+ return e.ID
+}
+
+// ForeachAddress iterates over all addresses and calls fn
+func (e *ENI) ForeachAddress(id string, fn types.AddressIterator) error {
+ for _, address := range e.Addresses {
+ if err := fn(id, e.ID, address, "", address); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// IsExcludedBySpec returns true if the ENI is excluded by the provided spec and
+// therefore should not be managed by Cilium.
+func (e *ENI) IsExcludedBySpec(spec ENISpec) bool {
+ if spec.FirstInterfaceIndex != nil && e.Number < *spec.FirstInterfaceIndex {
+ return true
+ }
+
+ if len(spec.ExcludeInterfaceTags) > 0 {
+ if types.Tags(e.Tags).Match(spec.ExcludeInterfaceTags) {
+ return true
+ }
+ }
+
+ return false
+}
+
+// ENIStatus is the status of ENI addressing of the node
+type ENIStatus struct {
+ // ENIs is the list of ENIs on the node
+ //
+ // +optional
+ ENIs map[string]ENI `json:"enis,omitempty"`
+}
+
+// AwsSubnet stores information regarding an AWS subnet
+type AwsSubnet struct {
+ // ID is the ID of the subnet
+ ID string `json:"id,omitempty"`
+
+ // CIDR is the CIDR range associated with the subnet
+ CIDR string `json:"cidr,omitempty"`
+}
+
+// AwsVPC stores information regarding an AWS VPC
+type AwsVPC struct {
+ /// ID is the ID of a VPC
+ ID string `json:"id,omitempty"`
+
+ // PrimaryCIDR is the primary CIDR of the VPC
+ PrimaryCIDR string `json:"primary-cidr,omitempty"`
+
+ // CIDRs is the list of CIDR ranges associated with the VPC
+ CIDRs []string `json:"cidrs,omitempty"`
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/aws/eni/types/zz_generated.deepcopy.go b/vendor/github.com/cilium/cilium/pkg/aws/eni/types/zz_generated.deepcopy.go
new file mode 100644
index 000000000..a5011d397
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/aws/eni/types/zz_generated.deepcopy.go
@@ -0,0 +1,176 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package types
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AwsSubnet) DeepCopyInto(out *AwsSubnet) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AwsSubnet.
+func (in *AwsSubnet) DeepCopy() *AwsSubnet {
+ if in == nil {
+ return nil
+ }
+ out := new(AwsSubnet)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AwsVPC) DeepCopyInto(out *AwsVPC) {
+ *out = *in
+ if in.CIDRs != nil {
+ in, out := &in.CIDRs, &out.CIDRs
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AwsVPC.
+func (in *AwsVPC) DeepCopy() *AwsVPC {
+ if in == nil {
+ return nil
+ }
+ out := new(AwsVPC)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ENI) DeepCopyInto(out *ENI) {
+ *out = *in
+ out.Subnet = in.Subnet
+ in.VPC.DeepCopyInto(&out.VPC)
+ if in.Addresses != nil {
+ in, out := &in.Addresses, &out.Addresses
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Prefixes != nil {
+ in, out := &in.Prefixes, &out.Prefixes
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.SecurityGroups != nil {
+ in, out := &in.SecurityGroups, &out.SecurityGroups
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Tags != nil {
+ in, out := &in.Tags, &out.Tags
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ENI.
+func (in *ENI) DeepCopy() *ENI {
+ if in == nil {
+ return nil
+ }
+ out := new(ENI)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ENISpec) DeepCopyInto(out *ENISpec) {
+ *out = *in
+ if in.FirstInterfaceIndex != nil {
+ in, out := &in.FirstInterfaceIndex, &out.FirstInterfaceIndex
+ *out = new(int)
+ **out = **in
+ }
+ if in.SecurityGroups != nil {
+ in, out := &in.SecurityGroups, &out.SecurityGroups
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.SecurityGroupTags != nil {
+ in, out := &in.SecurityGroupTags, &out.SecurityGroupTags
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.SubnetIDs != nil {
+ in, out := &in.SubnetIDs, &out.SubnetIDs
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.SubnetTags != nil {
+ in, out := &in.SubnetTags, &out.SubnetTags
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.ExcludeInterfaceTags != nil {
+ in, out := &in.ExcludeInterfaceTags, &out.ExcludeInterfaceTags
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.DeleteOnTermination != nil {
+ in, out := &in.DeleteOnTermination, &out.DeleteOnTermination
+ *out = new(bool)
+ **out = **in
+ }
+ if in.UsePrimaryAddress != nil {
+ in, out := &in.UsePrimaryAddress, &out.UsePrimaryAddress
+ *out = new(bool)
+ **out = **in
+ }
+ if in.DisablePrefixDelegation != nil {
+ in, out := &in.DisablePrefixDelegation, &out.DisablePrefixDelegation
+ *out = new(bool)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ENISpec.
+func (in *ENISpec) DeepCopy() *ENISpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ENISpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ENIStatus) DeepCopyInto(out *ENIStatus) {
+ *out = *in
+ if in.ENIs != nil {
+ in, out := &in.ENIs, &out.ENIs
+ *out = make(map[string]ENI, len(*in))
+ for key, val := range *in {
+ (*out)[key] = *val.DeepCopy()
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ENIStatus.
+func (in *ENIStatus) DeepCopy() *ENIStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ENIStatus)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/aws/eni/types/zz_generated.deepequal.go b/vendor/github.com/cilium/cilium/pkg/aws/eni/types/zz_generated.deepequal.go
new file mode 100644
index 000000000..4c1c93fc0
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/aws/eni/types/zz_generated.deepequal.go
@@ -0,0 +1,361 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by deepequal-gen. DO NOT EDIT.
+
+package types
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *AwsSubnet) DeepEqual(other *AwsSubnet) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.ID != other.ID {
+ return false
+ }
+ if in.CIDR != other.CIDR {
+ return false
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *AwsVPC) DeepEqual(other *AwsVPC) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.ID != other.ID {
+ return false
+ }
+ if in.PrimaryCIDR != other.PrimaryCIDR {
+ return false
+ }
+ if ((in.CIDRs != nil) && (other.CIDRs != nil)) || ((in.CIDRs == nil) != (other.CIDRs == nil)) {
+ in, other := &in.CIDRs, &other.CIDRs
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if inElement != (*other)[i] {
+ return false
+ }
+ }
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *ENI) DeepEqual(other *ENI) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.ID != other.ID {
+ return false
+ }
+ if in.IP != other.IP {
+ return false
+ }
+ if in.MAC != other.MAC {
+ return false
+ }
+ if in.AvailabilityZone != other.AvailabilityZone {
+ return false
+ }
+ if in.Description != other.Description {
+ return false
+ }
+ if in.Number != other.Number {
+ return false
+ }
+ if in.Subnet != other.Subnet {
+ return false
+ }
+
+ if !in.VPC.DeepEqual(&other.VPC) {
+ return false
+ }
+
+ if ((in.Addresses != nil) && (other.Addresses != nil)) || ((in.Addresses == nil) != (other.Addresses == nil)) {
+ in, other := &in.Addresses, &other.Addresses
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if inElement != (*other)[i] {
+ return false
+ }
+ }
+ }
+ }
+
+ if ((in.Prefixes != nil) && (other.Prefixes != nil)) || ((in.Prefixes == nil) != (other.Prefixes == nil)) {
+ in, other := &in.Prefixes, &other.Prefixes
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if inElement != (*other)[i] {
+ return false
+ }
+ }
+ }
+ }
+
+ if ((in.SecurityGroups != nil) && (other.SecurityGroups != nil)) || ((in.SecurityGroups == nil) != (other.SecurityGroups == nil)) {
+ in, other := &in.SecurityGroups, &other.SecurityGroups
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if inElement != (*other)[i] {
+ return false
+ }
+ }
+ }
+ }
+
+ if ((in.Tags != nil) && (other.Tags != nil)) || ((in.Tags == nil) != (other.Tags == nil)) {
+ in, other := &in.Tags, &other.Tags
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for key, inValue := range *in {
+ if otherValue, present := (*other)[key]; !present {
+ return false
+ } else {
+ if inValue != otherValue {
+ return false
+ }
+ }
+ }
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *ENISpec) DeepEqual(other *ENISpec) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.InstanceID != other.InstanceID {
+ return false
+ }
+ if in.InstanceType != other.InstanceType {
+ return false
+ }
+ if in.MinAllocate != other.MinAllocate {
+ return false
+ }
+ if in.PreAllocate != other.PreAllocate {
+ return false
+ }
+ if in.MaxAboveWatermark != other.MaxAboveWatermark {
+ return false
+ }
+ if (in.FirstInterfaceIndex == nil) != (other.FirstInterfaceIndex == nil) {
+ return false
+ } else if in.FirstInterfaceIndex != nil {
+ if *in.FirstInterfaceIndex != *other.FirstInterfaceIndex {
+ return false
+ }
+ }
+
+ if ((in.SecurityGroups != nil) && (other.SecurityGroups != nil)) || ((in.SecurityGroups == nil) != (other.SecurityGroups == nil)) {
+ in, other := &in.SecurityGroups, &other.SecurityGroups
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if inElement != (*other)[i] {
+ return false
+ }
+ }
+ }
+ }
+
+ if ((in.SecurityGroupTags != nil) && (other.SecurityGroupTags != nil)) || ((in.SecurityGroupTags == nil) != (other.SecurityGroupTags == nil)) {
+ in, other := &in.SecurityGroupTags, &other.SecurityGroupTags
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for key, inValue := range *in {
+ if otherValue, present := (*other)[key]; !present {
+ return false
+ } else {
+ if inValue != otherValue {
+ return false
+ }
+ }
+ }
+ }
+ }
+
+ if ((in.SubnetIDs != nil) && (other.SubnetIDs != nil)) || ((in.SubnetIDs == nil) != (other.SubnetIDs == nil)) {
+ in, other := &in.SubnetIDs, &other.SubnetIDs
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if inElement != (*other)[i] {
+ return false
+ }
+ }
+ }
+ }
+
+ if ((in.SubnetTags != nil) && (other.SubnetTags != nil)) || ((in.SubnetTags == nil) != (other.SubnetTags == nil)) {
+ in, other := &in.SubnetTags, &other.SubnetTags
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for key, inValue := range *in {
+ if otherValue, present := (*other)[key]; !present {
+ return false
+ } else {
+ if inValue != otherValue {
+ return false
+ }
+ }
+ }
+ }
+ }
+
+ if in.NodeSubnetID != other.NodeSubnetID {
+ return false
+ }
+ if in.VpcID != other.VpcID {
+ return false
+ }
+ if in.AvailabilityZone != other.AvailabilityZone {
+ return false
+ }
+ if ((in.ExcludeInterfaceTags != nil) && (other.ExcludeInterfaceTags != nil)) || ((in.ExcludeInterfaceTags == nil) != (other.ExcludeInterfaceTags == nil)) {
+ in, other := &in.ExcludeInterfaceTags, &other.ExcludeInterfaceTags
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for key, inValue := range *in {
+ if otherValue, present := (*other)[key]; !present {
+ return false
+ } else {
+ if inValue != otherValue {
+ return false
+ }
+ }
+ }
+ }
+ }
+
+ if (in.DeleteOnTermination == nil) != (other.DeleteOnTermination == nil) {
+ return false
+ } else if in.DeleteOnTermination != nil {
+ if *in.DeleteOnTermination != *other.DeleteOnTermination {
+ return false
+ }
+ }
+
+ if (in.UsePrimaryAddress == nil) != (other.UsePrimaryAddress == nil) {
+ return false
+ } else if in.UsePrimaryAddress != nil {
+ if *in.UsePrimaryAddress != *other.UsePrimaryAddress {
+ return false
+ }
+ }
+
+ if (in.DisablePrefixDelegation == nil) != (other.DisablePrefixDelegation == nil) {
+ return false
+ } else if in.DisablePrefixDelegation != nil {
+ if *in.DisablePrefixDelegation != *other.DisablePrefixDelegation {
+ return false
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *ENIStatus) DeepEqual(other *ENIStatus) bool {
+ if other == nil {
+ return false
+ }
+
+ if ((in.ENIs != nil) && (other.ENIs != nil)) || ((in.ENIs == nil) != (other.ENIs == nil)) {
+ in, other := &in.ENIs, &other.ENIs
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for key, inValue := range *in {
+ if otherValue, present := (*other)[key]; !present {
+ return false
+ } else {
+ if !inValue.DeepEqual(&otherValue) {
+ return false
+ }
+ }
+ }
+ }
+ }
+
+ return true
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/azure/types/doc.go b/vendor/github.com/cilium/cilium/pkg/azure/types/doc.go
new file mode 100644
index 000000000..6d56b146b
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/azure/types/doc.go
@@ -0,0 +1,7 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// +deepequal-gen=package
+
+// +groupName=azure
+package types
diff --git a/vendor/github.com/cilium/cilium/pkg/azure/types/types.go b/vendor/github.com/cilium/cilium/pkg/azure/types/types.go
new file mode 100644
index 000000000..0d42674e8
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/azure/types/types.go
@@ -0,0 +1,190 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package types
+
+import (
+ "strings"
+
+ "github.com/cilium/cilium/pkg/ipam/types"
+)
+
+const (
+ // ProviderPrefix is the prefix used to indicate that a k8s ProviderID
+ // represents an Azure resource
+ ProviderPrefix = "azure://"
+
+ // InterfaceAddressLimit is the maximum number of addresses on an interface
+ //
+ //
+ // For more information:
+ // https://docs.microsoft.com/en-us/azure/azure-resource-manager/management/azure-subscription-service-limits?toc=%2fazure%2fvirtual-network%2ftoc.json#networking-limits
+ InterfaceAddressLimit = 256
+
+ // StateSucceeded is the address state for a successfully provisioned address
+ StateSucceeded = "succeeded"
+)
+
+// AzureSpec is the Azure specification of a node running via the Azure IPAM
+//
+// The Azure specification can either be provided explicitly by the user or the
+// cilium agent running on the node can be instructed to create the CiliumNode
+// custom resource along with an Azure specification when the node registers
+// itself to the Kubernetes cluster.
+// This struct is embedded into v2.CiliumNode
+//
+// +k8s:deepcopy-gen=true
+type AzureSpec struct {
+ // InterfaceName is the name of the interface the cilium-operator
+ // will use to allocate all the IPs on
+ //
+ // +kubebuilder:validation:Optional
+ InterfaceName string `json:"interface-name,omitempty"`
+}
+
+// AzureStatus is the status of Azure addressing of the node.
+// This struct is embedded into v2.CiliumNode
+//
+// +k8s:deepcopy-gen=true
+type AzureStatus struct {
+ // Interfaces is the list of interfaces on the node
+ //
+ // +optional
+ Interfaces []AzureInterface `json:"interfaces,omitempty"`
+}
+
+// AzureAddress is an IP address assigned to an AzureInterface
+type AzureAddress struct {
+ // IP is the ip address of the address
+ IP string `json:"ip,omitempty"`
+
+ // Subnet is the subnet the address belongs to
+ Subnet string `json:"subnet,omitempty"`
+
+ // State is the provisioning state of the address
+ State string `json:"state,omitempty"`
+}
+
+// AzureInterface represents an Azure Interface
+//
+// +k8s:deepcopy-gen=true
+type AzureInterface struct {
+ // ID is the identifier
+ //
+ // +optional
+ ID string `json:"id,omitempty"`
+
+ // Name is the name of the interface
+ //
+ // +optional
+ Name string `json:"name,omitempty"`
+
+ // MAC is the mac address
+ //
+ // +optional
+ MAC string `json:"mac,omitempty"`
+
+ // State is the provisioning state
+ //
+ // +optional
+ State string `json:"state,omitempty"`
+
+ // Addresses is the list of all IPs associated with the interface,
+ // including all secondary addresses
+ //
+ // +optional
+ Addresses []AzureAddress `json:"addresses,omitempty"`
+
+ // SecurityGroup is the security group associated with the interface
+ SecurityGroup string `json:"security-group,omitempty"`
+
+ // GatewayIP is the interface's subnet's default route
+ //
+ // OBSOLETE: This field is obsolete, please use Gateway field instead.
+ //
+ // +optional
+ GatewayIP string `json:"GatewayIP"`
+
+ // Gateway is the interface's subnet's default route
+ //
+ // +optional
+ Gateway string `json:"gateway"`
+
+ // CIDR is the range that the interface belongs to.
+ //
+ // +optional
+ CIDR string `json:"cidr,omitempty"`
+
+ // vmssName is the name of the virtual machine scale set. This field is
+ // set by extractIDs()
+ vmssName string `json:"-"`
+
+ // vmID is the ID of the virtual machine
+ vmID string `json:"-"`
+
+ // resourceGroup is the resource group the interface belongs to
+ resourceGroup string `json:"-"`
+}
+
+// SetID sets the Azure interface ID, as well as extracting other fields from
+// the ID itself.
+func (a *AzureInterface) SetID(id string) {
+ a.ID = id
+ a.extractIDs()
+}
+
+// InterfaceID returns the identifier of the interface
+func (a *AzureInterface) InterfaceID() string {
+ return a.ID
+}
+
+func (a *AzureInterface) extractIDs() {
+ switch {
+ // Interface from a VMSS instance:
+ // //subscriptions/xxx/resourceGroups/yyy/providers/Microsoft.Compute/virtualMachineScaleSets/ssss/virtualMachines/vvv/networkInterfaces/iii
+ case strings.Contains(a.ID, "virtualMachineScaleSets"):
+ segs := strings.Split(a.ID, "/")
+ if len(segs) >= 5 {
+ a.resourceGroup = segs[4]
+ }
+ if len(segs) >= 9 {
+ a.vmssName = segs[8]
+ }
+ if len(segs) >= 11 {
+ a.vmID = segs[10]
+ }
+ // Interface from a standalone instance:
+ // //subscriptions/xxx/resourceGroups/yyy/providers/Microsoft.Network/networkInterfaces/iii
+ case strings.Contains(a.ID, "/Microsoft.Network/"):
+ segs := strings.Split(a.ID, "/")
+ if len(segs) >= 5 {
+ a.resourceGroup = segs[4]
+ }
+ }
+}
+
+// GetResourceGroup returns the resource group the interface belongs to
+func (a *AzureInterface) GetResourceGroup() string {
+ return a.resourceGroup
+}
+
+// GetVMScaleSetName returns the VM scale set name the interface belongs to
+func (a *AzureInterface) GetVMScaleSetName() string {
+ return a.vmssName
+}
+
+// GetVMID returns the VM ID the interface belongs to
+func (a *AzureInterface) GetVMID() string {
+ return a.vmID
+}
+
+// ForeachAddress iterates over all addresses and calls fn
+func (a *AzureInterface) ForeachAddress(id string, fn types.AddressIterator) error {
+ for _, address := range a.Addresses {
+ if err := fn(id, a.ID, address.IP, address.Subnet, address); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/azure/types/zz_generated.deepcopy.go b/vendor/github.com/cilium/cilium/pkg/azure/types/zz_generated.deepcopy.go
new file mode 100644
index 000000000..ad51b4527
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/azure/types/zz_generated.deepcopy.go
@@ -0,0 +1,69 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package types
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AzureInterface) DeepCopyInto(out *AzureInterface) {
+ *out = *in
+ if in.Addresses != nil {
+ in, out := &in.Addresses, &out.Addresses
+ *out = make([]AzureAddress, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureInterface.
+func (in *AzureInterface) DeepCopy() *AzureInterface {
+ if in == nil {
+ return nil
+ }
+ out := new(AzureInterface)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AzureSpec) DeepCopyInto(out *AzureSpec) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureSpec.
+func (in *AzureSpec) DeepCopy() *AzureSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(AzureSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AzureStatus) DeepCopyInto(out *AzureStatus) {
+ *out = *in
+ if in.Interfaces != nil {
+ in, out := &in.Interfaces, &out.Interfaces
+ *out = make([]AzureInterface, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureStatus.
+func (in *AzureStatus) DeepCopy() *AzureStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(AzureStatus)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/azure/types/zz_generated.deepequal.go b/vendor/github.com/cilium/cilium/pkg/azure/types/zz_generated.deepequal.go
new file mode 100644
index 000000000..8761f1949
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/azure/types/zz_generated.deepequal.go
@@ -0,0 +1,131 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by deepequal-gen. DO NOT EDIT.
+
+package types
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *AzureAddress) DeepEqual(other *AzureAddress) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.IP != other.IP {
+ return false
+ }
+ if in.Subnet != other.Subnet {
+ return false
+ }
+ if in.State != other.State {
+ return false
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *AzureInterface) DeepEqual(other *AzureInterface) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.ID != other.ID {
+ return false
+ }
+ if in.Name != other.Name {
+ return false
+ }
+ if in.MAC != other.MAC {
+ return false
+ }
+ if in.State != other.State {
+ return false
+ }
+ if ((in.Addresses != nil) && (other.Addresses != nil)) || ((in.Addresses == nil) != (other.Addresses == nil)) {
+ in, other := &in.Addresses, &other.Addresses
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if !inElement.DeepEqual(&(*other)[i]) {
+ return false
+ }
+ }
+ }
+ }
+
+ if in.SecurityGroup != other.SecurityGroup {
+ return false
+ }
+ if in.GatewayIP != other.GatewayIP {
+ return false
+ }
+ if in.Gateway != other.Gateway {
+ return false
+ }
+ if in.CIDR != other.CIDR {
+ return false
+ }
+ if in.vmssName != other.vmssName {
+ return false
+ }
+ if in.vmID != other.vmID {
+ return false
+ }
+ if in.resourceGroup != other.resourceGroup {
+ return false
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *AzureSpec) DeepEqual(other *AzureSpec) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.InterfaceName != other.InterfaceName {
+ return false
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *AzureStatus) DeepEqual(other *AzureStatus) bool {
+ if other == nil {
+ return false
+ }
+
+ if ((in.Interfaces != nil) && (other.Interfaces != nil)) || ((in.Interfaces == nil) != (other.Interfaces == nil)) {
+ in, other := &in.Interfaces, &other.Interfaces
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if !inElement.DeepEqual(&(*other)[i]) {
+ return false
+ }
+ }
+ }
+ }
+
+ return true
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/backoff/backoff.go b/vendor/github.com/cilium/cilium/pkg/backoff/backoff.go
new file mode 100644
index 000000000..4180707b3
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/backoff/backoff.go
@@ -0,0 +1,203 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package backoff
+
+import (
+ "context"
+ "fmt"
+ "math"
+ "time"
+
+ "github.com/google/uuid"
+ "github.com/sirupsen/logrus"
+
+ "github.com/cilium/cilium/pkg/logging"
+ "github.com/cilium/cilium/pkg/logging/logfields"
+ "github.com/cilium/cilium/pkg/rand"
+)
+
+var (
+ log = logging.DefaultLogger.WithField(logfields.LogSubsys, "backoff")
+
+ randGen = rand.NewSafeRand(time.Now().UnixNano())
+)
+
+// NodeManager is the interface required to implement cluster size dependent
+// intervals
+type NodeManager interface {
+ ClusterSizeDependantInterval(baseInterval time.Duration) time.Duration
+}
+
+// nodeManager is a wrapper to enable using a plain function as NodeManager to implement
+// cluster size dependent intervals
+type nodeManager struct {
+ clusterSizeDependantInterval func(baseInterval time.Duration) time.Duration
+}
+
+// NewNodeManager returns a new NodeManager implementing cluster size dependent intervals
+// based on the given function. If the function is nil, then no tuning is performed.
+func NewNodeManager(clusterSizeDependantInterval func(baseInterval time.Duration) time.Duration) NodeManager {
+ return &nodeManager{clusterSizeDependantInterval: clusterSizeDependantInterval}
+}
+
+func (n *nodeManager) ClusterSizeDependantInterval(baseInterval time.Duration) time.Duration {
+ if n.clusterSizeDependantInterval == nil {
+ return baseInterval
+ }
+
+ return n.clusterSizeDependantInterval(baseInterval)
+}
+
+// Exponential implements an exponential backoff
+type Exponential struct {
+ // Min is the minimal backoff time, if unspecified, 1 second will be
+ // used
+ Min time.Duration
+
+ // Max is the maximum backoff time, if unspecified, no maximum time is
+ // applied
+ Max time.Duration
+
+ // Factor is the factor the backoff time grows exponentially, if
+ // unspecified, a factor of 2.0 will be used
+ Factor float64
+
+ // Jitter, when enabled, adds random jitter to the interval
+ Jitter bool
+
+ // NodeManager enables the use of cluster size dependent backoff
+ // intervals, i.e. the larger the cluster, the longer the backoff
+ // interval
+ NodeManager NodeManager
+
+ // Name is a free form string describing the operation subject to the
+ // backoff, if unspecified, a UUID is generated. This string is used
+ // for logging purposes.
+ Name string
+
+ // ResetAfter will reset the exponential back-off if no attempt is made for the amount of time specified here.
+ // Needs to be larger than the Max duration, otherwise it will be ignored to avoid accidental resets.
+ // If unspecified, no reset is performed.
+ ResetAfter time.Duration
+
+ lastBackoffStart time.Time
+
+ attempt int
+}
+
+// CalculateDuration calculates the backoff duration based on minimum base
+// interval, exponential factor, jitter and number of failures.
+func CalculateDuration(min, max time.Duration, factor float64, jitter bool, failures int) time.Duration {
+ minFloat := float64(min)
+ maxFloat := float64(max)
+
+ t := minFloat * math.Pow(factor, float64(failures))
+ if max != time.Duration(0) && t > maxFloat {
+ t = maxFloat
+ }
+
+ if jitter {
+ t = randGen.Float64()*(t-minFloat) + minFloat
+ }
+
+ return time.Duration(t)
+}
+
+// ClusterSizeDependantInterval returns a time.Duration that is dependent on
+// the cluster size, i.e. the number of nodes that have been discovered. This
+// can be used to control sync intervals of shared or centralized resources to
+// avoid overloading these resources as the cluster grows.
+//
+// Example sync interval with baseInterval = 1 * time.Minute
+//
+// nodes | sync interval
+// ------+-----------------
+// 1 | 41.588830833s
+// 2 | 1m05.916737320s
+// 4 | 1m36.566274746s
+// 8 | 2m11.833474640s
+// 16 | 2m49.992800643s
+// 32 | 3m29.790453687s
+// 64 | 4m10.463236193s
+// 128 | 4m51.588744261s
+// 256 | 5m32.944565093s
+// 512 | 6m14.416550710s
+// 1024 | 6m55.946873494s
+// 2048 | 7m37.506428894s
+// 4096 | 8m19.080616652s
+// 8192 | 9m00.662124608s
+// 16384 | 9m42.247293667s
+func ClusterSizeDependantInterval(baseInterval time.Duration, numNodes int) time.Duration {
+ // no nodes are being managed, no work will be performed, return
+ // baseInterval to check again in a reasonable timeframe
+ if numNodes == 0 {
+ return baseInterval
+ }
+
+ waitNanoseconds := float64(baseInterval.Nanoseconds()) * math.Log1p(float64(numNodes))
+ return time.Duration(int64(waitNanoseconds))
+}
+
+// Reset backoff attempt counter
+func (b *Exponential) Reset() {
+ b.attempt = 0
+}
+
+// Wait waits for the required time using an exponential backoff
+func (b *Exponential) Wait(ctx context.Context) error {
+ if resetDuration := b.ResetAfter; resetDuration != time.Duration(0) && resetDuration > b.Max {
+ if !b.lastBackoffStart.IsZero() {
+ if time.Since(b.lastBackoffStart) > resetDuration {
+ b.Reset()
+ }
+ }
+ }
+
+ b.lastBackoffStart = time.Now()
+ b.attempt++
+ t := b.Duration(b.attempt)
+
+ log.WithFields(logrus.Fields{
+ "time": t,
+ "attempt": b.attempt,
+ "name": b.Name,
+ }).Debug("Sleeping with exponential backoff")
+
+ select {
+ case <-ctx.Done():
+ return fmt.Errorf("exponential backoff cancelled via context: %s", ctx.Err())
+ case <-time.After(t):
+ }
+
+ return nil
+}
+
+// Duration returns the wait duration for the nth attempt
+func (b *Exponential) Duration(attempt int) time.Duration {
+ if b.Name == "" {
+ b.Name = uuid.New().String()
+ }
+
+ min := time.Duration(1) * time.Second
+ if b.Min != time.Duration(0) {
+ min = b.Min
+ }
+
+ factor := float64(2)
+ if b.Factor != float64(0) {
+ factor = b.Factor
+ }
+
+ t := CalculateDuration(min, b.Max, factor, b.Jitter, attempt)
+
+ if b.NodeManager != nil {
+ t = b.NodeManager.ClusterSizeDependantInterval(t)
+ }
+
+ if b.Max != time.Duration(0) && t > b.Max {
+ t = b.Max
+ }
+
+ return t
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/byteorder/byteorder.go b/vendor/github.com/cilium/cilium/pkg/byteorder/byteorder.go
new file mode 100644
index 000000000..caa855d21
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/byteorder/byteorder.go
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package byteorder
+
+import (
+ "net"
+ "net/netip"
+)
+
+// NetIPv4ToHost32 converts an net.IP to a uint32 in host byte order. ip
+// must be a IPv4 address, otherwise the function will panic.
+func NetIPv4ToHost32(ip net.IP) uint32 {
+ ipv4 := ip.To4()
+ _ = ipv4[3] // Assert length of ipv4.
+ return Native.Uint32(ipv4)
+}
+
+func NetIPAddrToHost32(ip netip.Addr) uint32 {
+ ipv4 := ip.As4()
+ return Native.Uint32(ipv4[:])
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/byteorder/byteorder_bigendian.go b/vendor/github.com/cilium/cilium/pkg/byteorder/byteorder_bigendian.go
new file mode 100644
index 000000000..7b0873f82
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/byteorder/byteorder_bigendian.go
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+//go:build armbe || arm64be || mips || mips64 || ppc64
+
+package byteorder
+
+import "encoding/binary"
+
+var Native binary.ByteOrder = binary.BigEndian
+
+func HostToNetwork16(u uint16) uint16 { return u }
+func HostToNetwork32(u uint32) uint32 { return u }
+func HostToNetwork64(u uint64) uint64 { return u }
+func NetworkToHost16(u uint16) uint16 { return u }
+func NetworkToHost32(u uint32) uint32 { return u }
+func NetworkToHost64(u uint64) uint64 { return u }
diff --git a/vendor/github.com/cilium/cilium/pkg/byteorder/byteorder_littleendian.go b/vendor/github.com/cilium/cilium/pkg/byteorder/byteorder_littleendian.go
new file mode 100644
index 000000000..0fffee1b2
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/byteorder/byteorder_littleendian.go
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+//go:build 386 || amd64 || arm || arm64 || mips64le || ppc64le || riscv64 || wasm
+
+package byteorder
+
+import (
+ "encoding/binary"
+ "math/bits"
+)
+
+var Native binary.ByteOrder = binary.LittleEndian
+
+func HostToNetwork16(u uint16) uint16 { return bits.ReverseBytes16(u) }
+func HostToNetwork32(u uint32) uint32 { return bits.ReverseBytes32(u) }
+func HostToNetwork64(u uint64) uint64 { return bits.ReverseBytes64(u) }
+func NetworkToHost16(u uint16) uint16 { return bits.ReverseBytes16(u) }
+func NetworkToHost32(u uint32) uint32 { return bits.ReverseBytes32(u) }
+func NetworkToHost64(u uint64) uint64 { return bits.ReverseBytes64(u) }
diff --git a/vendor/github.com/cilium/cilium/pkg/byteorder/doc.go b/vendor/github.com/cilium/cilium/pkg/byteorder/doc.go
new file mode 100644
index 000000000..1c2497c75
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/byteorder/doc.go
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Package byteorder provides functions to convert from and to network byte order.
+package byteorder
diff --git a/vendor/github.com/cilium/cilium/pkg/cidr/cidr.go b/vendor/github.com/cilium/cilium/pkg/cidr/cidr.go
index a7e974ff6..66c6e5cae 100644
--- a/vendor/github.com/cilium/cilium/pkg/cidr/cidr.go
+++ b/vendor/github.com/cilium/cilium/pkg/cidr/cidr.go
@@ -72,7 +72,6 @@ func (in *CIDR) DeepCopyInto(out *CIDR) {
*out = make(net.IPMask, len(*in))
copy(*out, *in)
}
- return
}
// AvailableIPs returns the number of IPs available in a CIDR
@@ -89,7 +88,7 @@ func (n *CIDR) Equal(o *CIDR) bool {
return Equal(n.IPNet, o.IPNet)
}
-// Equal returns true if the n and o net.IPNet CIDRs arr Equal.
+// Equal returns true if the n and o net.IPNet CIDRs are Equal.
func Equal(n, o *net.IPNet) bool {
if n == nil || o == nil {
return n == o
@@ -101,6 +100,23 @@ func Equal(n, o *net.IPNet) bool {
bytes.Equal(n.Mask, o.Mask)
}
+// ZeroNet generates a zero net.IPNet object for the given address family
+func ZeroNet(family int) *net.IPNet {
+ switch family {
+ case FAMILY_V4:
+ return &net.IPNet{
+ IP: net.IPv4zero,
+ Mask: net.CIDRMask(0, 8*net.IPv4len),
+ }
+ case FAMILY_V6:
+ return &net.IPNet{
+ IP: net.IPv6zero,
+ Mask: net.CIDRMask(0, 8*net.IPv6len),
+ }
+ }
+ return nil
+}
+
// ContainsAll returns true if 'ipNets1' contains all net.IPNet of 'ipNets2'
func ContainsAll(ipNets1, ipNets2 []*net.IPNet) bool {
for _, n := range ipNets2 {
diff --git a/vendor/github.com/cilium/cilium/pkg/cidr/cidr_linux.go b/vendor/github.com/cilium/cilium/pkg/cidr/cidr_linux.go
new file mode 100644
index 000000000..a43d9b46a
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/cidr/cidr_linux.go
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package cidr
+
+import "github.com/vishvananda/netlink/nl"
+
+// Family type definitions
+const (
+ FAMILY_ALL = nl.FAMILY_ALL
+ FAMILY_V4 = nl.FAMILY_V4
+ FAMILY_V6 = nl.FAMILY_V6
+ FAMILY_MPLS = nl.FAMILY_MPLS
+)
diff --git a/vendor/github.com/cilium/cilium/pkg/cidr/cidr_unspecified.go b/vendor/github.com/cilium/cilium/pkg/cidr/cidr_unspecified.go
new file mode 100644
index 000000000..dfe393960
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/cidr/cidr_unspecified.go
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+//go:build !linux
+
+package cidr
+
+// Dummy values on non-linux platform
+const (
+ FAMILY_V4 = iota
+ FAMILY_V6
+)
diff --git a/vendor/github.com/cilium/cilium/pkg/client/client.go b/vendor/github.com/cilium/cilium/pkg/client/client.go
new file mode 100644
index 000000000..a1af5a888
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/client/client.go
@@ -0,0 +1,747 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package client
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "net/url"
+ "os"
+ "sort"
+ "strings"
+ "text/tabwriter"
+ "time"
+
+ runtime_client "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+
+ clientapi "github.com/cilium/cilium/api/v1/client"
+ "github.com/cilium/cilium/api/v1/models"
+ "github.com/cilium/cilium/pkg/defaults"
+)
+
+type Client struct {
+ clientapi.CiliumAPI
+}
+
+// DefaultSockPath returns default UNIX domain socket path or
+// path set using CILIUM_SOCK env variable
+func DefaultSockPath() string {
+ // Check if environment variable points to socket
+ e := os.Getenv(defaults.SockPathEnv)
+ if e == "" {
+ // If unset, fall back to default value
+ e = defaults.SockPath
+ }
+ return "unix://" + e
+
+}
+
+func configureTransport(tr *http.Transport, proto, addr string) *http.Transport {
+ if tr == nil {
+ tr = &http.Transport{}
+ }
+
+ if proto == "unix" {
+ // No need for compression in local communications.
+ tr.DisableCompression = true
+ tr.DialContext = func(_ context.Context, _, _ string) (net.Conn, error) {
+ return net.Dial(proto, addr)
+ }
+ } else {
+ tr.Proxy = http.ProxyFromEnvironment
+ tr.DialContext = (&net.Dialer{}).DialContext
+ }
+
+ return tr
+}
+
+// NewDefaultClient creates a client with default parameters connecting to UNIX domain socket.
+func NewDefaultClient() (*Client, error) {
+ return NewClient("")
+}
+
+// NewDefaultClientWithTimeout creates a client with default parameters connecting to UNIX
+// domain socket and waits for cilium-agent availability.
+func NewDefaultClientWithTimeout(timeout time.Duration) (*Client, error) {
+ timeoutAfter := time.After(timeout)
+ var c *Client
+ var err error
+ for {
+ select {
+ case <-timeoutAfter:
+ return nil, fmt.Errorf("failed to create cilium agent client after %f seconds timeout: %s", timeout.Seconds(), err)
+ default:
+ }
+
+ c, err = NewDefaultClient()
+ if err != nil {
+ time.Sleep(500 * time.Millisecond)
+ continue
+ }
+
+ for {
+ select {
+ case <-timeoutAfter:
+ return nil, fmt.Errorf("failed to create cilium agent client after %f seconds timeout: %s", timeout.Seconds(), err)
+ default:
+ }
+ // This is an API call that we do to the cilium-agent to check
+ // if it is up and running.
+ _, err = c.Daemon.GetConfig(nil)
+ if err != nil {
+ time.Sleep(500 * time.Millisecond)
+ continue
+ }
+ return c, nil
+ }
+ }
+}
+
+// NewClient creates a client for the given `host`.
+// If host is nil then use SockPath provided by CILIUM_SOCK
+// or the cilium default SockPath
+func NewClient(host string) (*Client, error) {
+ clientTrans, err := NewRuntime(host)
+ return &Client{*clientapi.New(clientTrans, strfmt.Default)}, err
+}
+
+func NewRuntime(host string) (*runtime_client.Runtime, error) {
+ if host == "" {
+ host = DefaultSockPath()
+ }
+ tmp := strings.SplitN(host, "://", 2)
+ if len(tmp) != 2 {
+ return nil, fmt.Errorf("invalid host format '%s'", host)
+ }
+
+ hostHeader := tmp[1]
+
+ switch tmp[0] {
+ case "tcp":
+ if _, err := url.Parse("tcp://" + tmp[1]); err != nil {
+ return nil, err
+ }
+ host = "http://" + tmp[1]
+ case "unix":
+ host = tmp[1]
+ // For local communication (unix domain sockets), the hostname is not used. Leave
+ // Host header empty because otherwise it would be rejected by net/http client-side
+ // sanitization, see https://go.dev/issue/60374.
+ hostHeader = "localhost"
+ }
+
+ transport := configureTransport(nil, tmp[0], host)
+ httpClient := &http.Client{Transport: transport}
+ clientTrans := runtime_client.NewWithClient(hostHeader, clientapi.DefaultBasePath,
+ clientapi.DefaultSchemes, httpClient)
+ return clientTrans, nil
+}
+
+// Hint tries to improve the error message displayed to the user.
+func Hint(err error) error {
+ if err == nil {
+ return err
+ }
+
+ if errors.Is(err, context.DeadlineExceeded) {
+ return fmt.Errorf("Cilium API client timeout exceeded")
+ }
+
+ e, _ := url.PathUnescape(err.Error())
+ if strings.Contains(err.Error(), defaults.SockPath) {
+ return fmt.Errorf("%s\nIs the agent running?", e)
+ }
+ return fmt.Errorf("%s", e)
+}
+
+func timeSince(since time.Time) string {
+ out := "never"
+ if !since.IsZero() {
+ t := time.Since(since)
+ out = t.Truncate(time.Second).String() + " ago"
+ }
+
+ return out
+}
+
+func stateUnhealthy(state string) bool {
+ return state == models.StatusStateWarning ||
+ state == models.StatusStateFailure
+}
+
+func statusUnhealthy(s *models.Status) bool {
+ if s != nil {
+ return stateUnhealthy(s.State)
+ }
+ return false
+}
+
+// FormatStatusResponseBrief writes a one-line status to the writer. If
+// everything ok, this is "ok", otherwise a message of the form "error in ..."
+func FormatStatusResponseBrief(w io.Writer, sr *models.StatusResponse) {
+ msg := ""
+
+ switch {
+ case statusUnhealthy(sr.Kvstore):
+ msg = fmt.Sprintf("kvstore: %s", sr.Kvstore.Msg)
+ case statusUnhealthy(sr.ContainerRuntime):
+ msg = fmt.Sprintf("container runtime: %s", sr.ContainerRuntime.Msg)
+ case sr.Kubernetes != nil && stateUnhealthy(sr.Kubernetes.State):
+ msg = fmt.Sprintf("kubernetes: %s", sr.Kubernetes.Msg)
+ case statusUnhealthy(sr.Cilium):
+ msg = fmt.Sprintf("cilium: %s", sr.Cilium.Msg)
+ case sr.Cluster != nil && statusUnhealthy(sr.Cluster.CiliumHealth):
+ msg = fmt.Sprintf("cilium-health: %s", sr.Cluster.CiliumHealth.Msg)
+ }
+
+ // Only bother looking at controller failures if everything else is ok
+ if msg == "" {
+ for _, ctrl := range sr.Controllers {
+ if ctrl.Status == nil {
+ continue
+ }
+ if ctrl.Status.LastFailureMsg != "" {
+ msg = fmt.Sprintf("controller %s: %s",
+ ctrl.Name, ctrl.Status.LastFailureMsg)
+ break
+ }
+ }
+ }
+
+ if msg == "" {
+ fmt.Fprintf(w, "OK\n")
+ } else {
+ fmt.Fprintf(w, "error in %s\n", msg)
+ }
+}
+
+func clusterReadiness(cluster *models.RemoteCluster) string {
+ if !cluster.Ready {
+ return "not-ready"
+ }
+ return "ready"
+}
+
+func numReadyClusters(clustermesh *models.ClusterMeshStatus) int {
+ numReady := 0
+ for _, cluster := range clustermesh.Clusters {
+ if cluster.Ready {
+ numReady++
+ }
+ }
+ return numReady
+}
+
+type StatusDetails struct {
+ // AllAddress causes all addresses to be printed by FormatStatusResponse.
+ AllAddresses bool
+ // AllControllers causes all controllers to be printed by FormatStatusResponse.
+ AllControllers bool
+ // AllNodes causes all nodes to be printed by FormatStatusResponse.
+ AllNodes bool
+ // AllRedirects causes all redirects to be printed by FormatStatusResponse.
+ AllRedirects bool
+ // AllClusters causes all clusters to be printed by FormatStatusResponse.
+ AllClusters bool
+ // BPFMapDetails causes BPF map details to be printed by FormatStatusResponse.
+ BPFMapDetails bool
+ // KubeProxyReplacementDetails causes BPF kube-proxy details to be printed by FormatStatusResponse.
+ KubeProxyReplacementDetails bool
+ // ClockSourceDetails causes BPF time-keeping internals to be printed by FormatStatusResponse.
+ ClockSourceDetails bool
+}
+
+var (
+ // StatusAllDetails causes no additional status details to be printed by
+ // FormatStatusResponse.
+ StatusNoDetails = StatusDetails{}
+ // StatusAllDetails causes all status details to be printed by FormatStatusResponse.
+ StatusAllDetails = StatusDetails{
+ AllAddresses: true,
+ AllControllers: true,
+ AllNodes: true,
+ AllRedirects: true,
+ AllClusters: true,
+ BPFMapDetails: true,
+ KubeProxyReplacementDetails: true,
+ ClockSourceDetails: true,
+ }
+)
+
+// FormatStatusResponse writes a StatusResponse as a string to the writer. The bit mask sd controls
+// whether a additional details are printed about a certain aspect of the status. In case there are
+// errors, some details may be printed regardless of the value of sd.
+func FormatStatusResponse(w io.Writer, sr *models.StatusResponse, sd StatusDetails) {
+ if sr.Kvstore != nil {
+ fmt.Fprintf(w, "KVStore:\t%s\t%s\n", sr.Kvstore.State, sr.Kvstore.Msg)
+ }
+ if sr.ContainerRuntime != nil {
+ fmt.Fprintf(w, "ContainerRuntime:\t%s\t%s\n",
+ sr.ContainerRuntime.State, sr.ContainerRuntime.Msg)
+ }
+
+ kubeProxyDevices := ""
+ if sr.Kubernetes != nil {
+ fmt.Fprintf(w, "Kubernetes:\t%s\t%s\n", sr.Kubernetes.State, sr.Kubernetes.Msg)
+ if sr.Kubernetes.State != models.K8sStatusStateDisabled {
+ sort.Strings(sr.Kubernetes.K8sAPIVersions)
+ fmt.Fprintf(w, "Kubernetes APIs:\t[\"%s\"]\n", strings.Join(sr.Kubernetes.K8sAPIVersions, "\", \""))
+ }
+
+ }
+ if sr.KubeProxyReplacement != nil {
+ devices := ""
+ if sr.KubeProxyReplacement.Mode != models.KubeProxyReplacementModeDisabled {
+ for i, dev := range sr.KubeProxyReplacement.DeviceList {
+ kubeProxyDevices += fmt.Sprintf("%s %s", dev.Name, strings.Join(dev.IP, " "))
+ if dev.Name == sr.KubeProxyReplacement.DirectRoutingDevice {
+ kubeProxyDevices += " (Direct Routing)"
+ }
+ if i+1 != len(sr.KubeProxyReplacement.Devices) {
+ kubeProxyDevices += ", "
+ }
+ }
+ if len(sr.KubeProxyReplacement.DeviceList) > 0 {
+ devices = "[" + kubeProxyDevices + "]"
+ }
+ }
+ fmt.Fprintf(w, "KubeProxyReplacement:\t%s\t%s\n",
+ sr.KubeProxyReplacement.Mode, devices)
+ }
+ if sr.HostFirewall != nil {
+ fmt.Fprintf(w, "Host firewall:\t%s", sr.HostFirewall.Mode)
+ if sr.HostFirewall.Mode != models.HostFirewallModeDisabled {
+ fmt.Fprintf(w, "\t[%s]", strings.Join(sr.HostFirewall.Devices, ", "))
+ }
+ fmt.Fprintf(w, "\n")
+ }
+
+ if sr.CniChaining != nil {
+ fmt.Fprintf(w, "CNI Chaining:\t%s\n", sr.CniChaining.Mode)
+ }
+
+ if sr.CniFile != nil {
+ fmt.Fprintf(w, "CNI Config file:\t%s\n", sr.CniFile.Msg)
+ }
+
+ if sr.Cilium != nil {
+ fmt.Fprintf(w, "Cilium:\t%s %s\n", sr.Cilium.State, sr.Cilium.Msg)
+ }
+
+ if sr.Stale != nil {
+ sortedProbes := make([]string, 0, len(sr.Stale))
+ for probe := range sr.Stale {
+ sortedProbes = append(sortedProbes, probe)
+ }
+ sort.Strings(sortedProbes)
+
+ stalesStr := make([]string, 0, len(sr.Stale))
+ for _, probe := range sortedProbes {
+ stalesStr = append(stalesStr, fmt.Sprintf("%q since %s", probe, sr.Stale[probe]))
+ }
+
+ fmt.Fprintf(w, "Stale status:\t%s\n", strings.Join(stalesStr, ", "))
+ }
+
+ if nm := sr.NodeMonitor; nm != nil {
+ fmt.Fprintf(w, "NodeMonitor:\tListening for events on %d CPUs with %dx%d of shared memory\n",
+ nm.Cpus, nm.Npages, nm.Pagesize)
+ if nm.Lost != 0 || nm.Unknown != 0 {
+ fmt.Fprintf(w, "\t%d events lost, %d unknown notifications\n", nm.Lost, nm.Unknown)
+ }
+ } else {
+ fmt.Fprintf(w, "NodeMonitor:\tDisabled\n")
+ }
+
+ if sr.Cluster != nil {
+ if sr.Cluster.CiliumHealth != nil {
+ ch := sr.Cluster.CiliumHealth
+ fmt.Fprintf(w, "Cilium health daemon:\t%s\t%s\n", ch.State, ch.Msg)
+ }
+ }
+
+ if sr.Ipam != nil {
+ fmt.Fprintf(w, "IPAM:\t%s\n", sr.Ipam.Status)
+ if sd.AllAddresses {
+ fmt.Fprintf(w, "Allocated addresses:\n")
+ out := make([]string, 0, len(sr.Ipam.Allocations))
+ for ip, owner := range sr.Ipam.Allocations {
+ out = append(out, fmt.Sprintf(" %s (%s)", ip, owner))
+ }
+ sort.Strings(out)
+ for _, line := range out {
+ fmt.Fprintln(w, line)
+ }
+ }
+ }
+
+ if sr.ClusterMesh != nil {
+ fmt.Fprintf(w, "ClusterMesh:\t%d/%d clusters ready, %d global-services\n",
+ numReadyClusters(sr.ClusterMesh), len(sr.ClusterMesh.Clusters), sr.ClusterMesh.NumGlobalServices)
+
+ for _, cluster := range sr.ClusterMesh.Clusters {
+ if sd.AllClusters || !cluster.Ready {
+ fmt.Fprintf(w, " %s: %s, %d nodes, %d endpoints, %d identities, %d services, %d failures (last: %s)\n",
+ cluster.Name, clusterReadiness(cluster), cluster.NumNodes,
+ cluster.NumEndpoints, cluster.NumIdentities, cluster.NumSharedServices,
+ cluster.NumFailures, timeSince(time.Time(cluster.LastFailure)))
+ fmt.Fprintf(w, " └ %s\n", cluster.Status)
+
+ fmt.Fprint(w, " └ remote configuration: ")
+ if cluster.Config != nil {
+ fmt.Fprintf(w, "expected=%t, retrieved=%t", cluster.Config.Required, cluster.Config.Retrieved)
+ if cluster.Config.Retrieved {
+ fmt.Fprintf(w, ", cluster-id=%d, kvstoremesh=%t, sync-canaries=%t",
+ cluster.Config.ClusterID, cluster.Config.Kvstoremesh, cluster.Config.SyncCanaries)
+ }
+ } else {
+ fmt.Fprint(w, "expected=unknown, retrieved=unknown")
+ }
+ fmt.Fprint(w, "\n")
+
+ if cluster.Synced != nil {
+ fmt.Fprintf(w, " └ synchronization status: nodes=%v, endpoints=%v, identities=%v, services=%v\n",
+ cluster.Synced.Nodes, cluster.Synced.Endpoints, cluster.Synced.Identities, cluster.Synced.Services)
+ }
+ }
+ }
+ }
+
+ if sr.IPV4BigTCP != nil {
+ status := "Disabled"
+ if sr.IPV4BigTCP.Enabled {
+ max := fmt.Sprintf("[%d]", sr.IPV4BigTCP.MaxGSO)
+ if sr.IPV4BigTCP.MaxGRO != sr.IPV4BigTCP.MaxGSO {
+ max = fmt.Sprintf("[%d, %d]", sr.IPV4BigTCP.MaxGRO, sr.IPV4BigTCP.MaxGSO)
+ }
+ status = fmt.Sprintf("Enabled\t%s", max)
+ }
+ fmt.Fprintf(w, "IPv4 BIG TCP:\t%s\n", status)
+ }
+
+ if sr.IPV6BigTCP != nil {
+ status := "Disabled"
+ if sr.IPV6BigTCP.Enabled {
+ max := fmt.Sprintf("[%d]", sr.IPV6BigTCP.MaxGSO)
+ if sr.IPV6BigTCP.MaxGRO != sr.IPV6BigTCP.MaxGSO {
+ max = fmt.Sprintf("[%d, %d]", sr.IPV6BigTCP.MaxGRO, sr.IPV6BigTCP.MaxGSO)
+ }
+ status = fmt.Sprintf("Enabled\t%s", max)
+ }
+ fmt.Fprintf(w, "IPv6 BIG TCP:\t%s\n", status)
+ }
+
+ if sr.BandwidthManager != nil {
+ var status string
+ if !sr.BandwidthManager.Enabled {
+ status = "Disabled"
+ } else {
+ status = fmt.Sprintf("EDT with BPF [%s] [%s]",
+ strings.ToUpper(sr.BandwidthManager.CongestionControl),
+ strings.Join(sr.BandwidthManager.Devices, ", "))
+ }
+ fmt.Fprintf(w, "BandwidthManager:\t%s\n", status)
+ }
+
+ if sr.HostRouting != nil {
+ fmt.Fprintf(w, "Host Routing:\t%s\n", sr.HostRouting.Mode)
+ }
+
+ if sr.Masquerading != nil {
+ var status string
+
+ enabled := func(enabled bool) string {
+ if enabled {
+ return "Enabled"
+ }
+ return "Disabled"
+ }
+
+ if sr.Masquerading.EnabledProtocols == nil {
+ status = enabled(sr.Masquerading.Enabled)
+ } else if !sr.Masquerading.EnabledProtocols.IPV4 && !sr.Masquerading.EnabledProtocols.IPV6 {
+ status = enabled(false)
+ } else {
+ if sr.Masquerading.Mode == models.MasqueradingModeBPF {
+ if sr.Masquerading.IPMasqAgent {
+ status = "BPF (ip-masq-agent)"
+ } else {
+ status = "BPF"
+ }
+ if sr.KubeProxyReplacement != nil {
+ // When BPF Masquerading is enabled we don't do any masquerading for IPv6
+ // traffic so no SNAT Exclusion IPv6 CIDR is listed in status output.
+ devStr := ""
+ for i, dev := range sr.KubeProxyReplacement.DeviceList {
+ devStr += dev.Name
+ if i+1 != len(sr.KubeProxyReplacement.DeviceList) {
+ devStr += ", "
+ }
+ }
+ status += fmt.Sprintf("\t[%s]\t%s",
+ devStr,
+ sr.Masquerading.SnatExclusionCidrV4)
+ }
+
+ } else if sr.Masquerading.Mode == models.MasqueradingModeIptables {
+ status = "IPTables"
+ }
+
+ status = fmt.Sprintf("%s [IPv4: %s, IPv6: %s]", status,
+ enabled(sr.Masquerading.EnabledProtocols.IPV4), enabled(sr.Masquerading.EnabledProtocols.IPV6))
+ }
+ fmt.Fprintf(w, "Masquerading:\t%s\n", status)
+ }
+
+ if sd.ClockSourceDetails && sr.ClockSource != nil {
+ status := sr.ClockSource.Mode
+ if sr.ClockSource.Mode == models.ClockSourceModeJiffies {
+ status = fmt.Sprintf("%s\t[%d Hz]",
+ sr.ClockSource.Mode, sr.ClockSource.Hertz)
+ }
+ fmt.Fprintf(w, "Clock Source for BPF:\t%s\n", status)
+ }
+
+ if sr.Controllers != nil {
+ nFailing, out := 0, []string{" Name\tLast success\tLast error\tCount\tMessage\n"}
+ for _, ctrl := range sr.Controllers {
+ status := ctrl.Status
+ if status == nil {
+ continue
+ }
+
+ if status.ConsecutiveFailureCount > 0 {
+ nFailing++
+ } else if !sd.AllControllers {
+ continue
+ }
+
+ failSince := timeSince(time.Time(status.LastFailureTimestamp))
+ successSince := timeSince(time.Time(status.LastSuccessTimestamp))
+
+ err := "no error"
+ if status.LastFailureMsg != "" {
+ err = status.LastFailureMsg
+ }
+
+ out = append(out, fmt.Sprintf(" %s\t%s\t%s\t%d\t%s\t\n",
+ ctrl.Name, successSince, failSince, status.ConsecutiveFailureCount, err))
+ }
+
+ nOK := len(sr.Controllers) - nFailing
+ fmt.Fprintf(w, "Controller Status:\t%d/%d healthy\n", nOK, len(sr.Controllers))
+ if len(out) > 1 {
+ tab := tabwriter.NewWriter(w, 0, 0, 3, ' ', 0)
+ sort.Strings(out)
+ for _, s := range out {
+ fmt.Fprint(tab, s)
+ }
+ tab.Flush()
+ }
+
+ }
+
+ if sr.Proxy != nil {
+ fmt.Fprintf(w, "Proxy Status:\tOK, ip %s, %d redirects active on ports %s, Envoy: %s\n",
+ sr.Proxy.IP, sr.Proxy.TotalRedirects, sr.Proxy.PortRange, sr.Proxy.EnvoyDeploymentMode)
+ if sd.AllRedirects && sr.Proxy.TotalRedirects > 0 {
+ out := make([]string, 0, len(sr.Proxy.Redirects)+1)
+ for _, r := range sr.Proxy.Redirects {
+ out = append(out, fmt.Sprintf(" %s\t%s\t%d\n", r.Proxy, r.Name, r.ProxyPort))
+ }
+ tab := tabwriter.NewWriter(w, 0, 0, 3, ' ', 0)
+ fmt.Fprint(tab, " Protocol\tRedirect\tProxy Port\n")
+ sort.Strings(out)
+ for _, s := range out {
+ fmt.Fprint(tab, s)
+ }
+ tab.Flush()
+ }
+ } else {
+ fmt.Fprintf(w, "Proxy Status:\tNo managed proxy redirect\n")
+ }
+
+ if sr.IdentityRange != nil {
+ fmt.Fprintf(w, "Global Identity Range:\tmin %d, max %d\n",
+ sr.IdentityRange.MinIdentity, sr.IdentityRange.MaxIdentity)
+ } else {
+ fmt.Fprintf(w, "Global Identity Range:\tUnknown\n")
+ }
+
+ if sr.Hubble != nil {
+ var fields []string
+
+ state := sr.Hubble.State
+ if sr.Hubble.Msg != "" {
+ state = fmt.Sprintf("%s %s", state, sr.Hubble.Msg)
+ }
+ fields = append(fields, state)
+
+ if o := sr.Hubble.Observer; o != nil {
+ var observer []string
+
+ if o.MaxFlows > 0 {
+ observer = append(observer, fmt.Sprintf("Current/Max Flows: %d/%d (%.2f%%)",
+ o.CurrentFlows, o.MaxFlows, (float64(o.CurrentFlows)/float64(o.MaxFlows))*100))
+ }
+ if o.Uptime > 0 {
+ observer = append(observer, fmt.Sprintf("Flows/s: %.2f",
+ float64(o.SeenFlows)/time.Duration(o.Uptime).Seconds()))
+ }
+
+ fields = append(fields, strings.Join(observer, ", "))
+ }
+
+ if sr.Hubble.Metrics != nil {
+ fields = append(fields, fmt.Sprintf("Metrics: %s", sr.Hubble.Metrics.State))
+ }
+
+ fmt.Fprintf(w, "Hubble:\t%s\n", strings.Join(fields, "\t"))
+ }
+
+ if sd.KubeProxyReplacementDetails && sr.Kubernetes != nil && sr.KubeProxyReplacement != nil {
+ var selection, mode, xdp string
+
+ lb := "Disabled"
+ cIP := "Enabled"
+ nPort := "Disabled"
+ if np := sr.KubeProxyReplacement.Features.NodePort; np.Enabled {
+ selection = np.Algorithm
+ if selection == models.KubeProxyReplacementFeaturesNodePortAlgorithmMaglev {
+ selection = fmt.Sprintf("%s (Table Size: %d)", np.Algorithm, np.LutSize)
+ }
+ xdp = np.Acceleration
+ mode = np.Mode
+ nPort = fmt.Sprintf("Enabled (Range: %d-%d)", np.PortMin, np.PortMax)
+ lb = "Enabled"
+ }
+
+ affinity := "Disabled"
+ if sr.KubeProxyReplacement.Features.SessionAffinity.Enabled {
+ affinity = "Enabled"
+ }
+
+ hPort := "Disabled"
+ if sr.KubeProxyReplacement.Features.HostPort.Enabled {
+ hPort = "Enabled"
+ }
+
+ eIP := "Disabled"
+ if sr.KubeProxyReplacement.Features.ExternalIPs.Enabled {
+ eIP = "Enabled"
+ }
+
+ socketLB := "Disabled"
+ if slb := sr.KubeProxyReplacement.Features.SocketLB; slb.Enabled {
+ socketLB = "Enabled"
+ }
+
+ socketLBTracing := "Disabled"
+ if st := sr.KubeProxyReplacement.Features.SocketLBTracing; st.Enabled {
+ socketLBTracing = "Enabled"
+ }
+
+ socketLBCoverage := "Full"
+ if sr.KubeProxyReplacement.Features.BpfSocketLBHostnsOnly {
+ socketLBCoverage = "Hostns-only"
+ }
+
+ gracefulTerm := "Disabled"
+ if sr.KubeProxyReplacement.Features.GracefulTermination.Enabled {
+ gracefulTerm = "Enabled"
+ }
+
+ nat46X64 := "Disabled"
+ nat46X64GW := "Disabled"
+ nat46X64SVC := "Disabled"
+ prefixes := ""
+ if sr.KubeProxyReplacement.Features.Nat46X64.Enabled {
+ nat46X64 = "Enabled"
+ if svc := sr.KubeProxyReplacement.Features.Nat46X64.Service; svc.Enabled {
+ nat46X64SVC = "Enabled"
+ }
+ if gw := sr.KubeProxyReplacement.Features.Nat46X64.Gateway; gw.Enabled {
+ nat46X64GW = "Enabled"
+ prefixes = strings.Join(gw.Prefixes, ", ")
+ }
+ }
+
+ fmt.Fprintf(w, "KubeProxyReplacement Details:\n")
+ tab := tabwriter.NewWriter(w, 0, 0, 3, ' ', 0)
+ fmt.Fprintf(tab, " Status:\t%s\n", sr.KubeProxyReplacement.Mode)
+ fmt.Fprintf(tab, " Socket LB:\t%s\n", socketLB)
+ fmt.Fprintf(tab, " Socket LB Tracing:\t%s\n", socketLBTracing)
+ fmt.Fprintf(tab, " Socket LB Coverage:\t%s\n", socketLBCoverage)
+ if kubeProxyDevices != "" {
+ fmt.Fprintf(tab, " Devices:\t%s\n", kubeProxyDevices)
+ }
+ if mode != "" {
+ fmt.Fprintf(tab, " Mode:\t%s\n", mode)
+ }
+ if selection != "" {
+ fmt.Fprintf(tab, " Backend Selection:\t%s\n", selection)
+ }
+ fmt.Fprintf(tab, " Session Affinity:\t%s\n", affinity)
+ fmt.Fprintf(tab, " Graceful Termination:\t%s\n", gracefulTerm)
+ if nat46X64 == "Disabled" {
+ fmt.Fprintf(tab, " NAT46/64 Support:\t%s\n", nat46X64)
+ } else {
+ fmt.Fprintf(tab, " NAT46/64 Support:\n")
+ fmt.Fprintf(tab, " - Services:\t%s\n", nat46X64SVC)
+ fmt.Fprintf(tab, " - Gateway:\t%s\n", nat46X64GW)
+ if nat46X64GW == "Enabled" && prefixes != "" {
+ fmt.Fprintf(tab, " Prefixes:\t%s\n", prefixes)
+ }
+ }
+ if xdp != "" {
+ fmt.Fprintf(tab, " XDP Acceleration:\t%s\n", xdp)
+ }
+ fmt.Fprintf(tab, " Services:\n")
+ fmt.Fprintf(tab, " - ClusterIP:\t%s\n", cIP)
+ fmt.Fprintf(tab, " - NodePort:\t%s \n", nPort)
+ fmt.Fprintf(tab, " - LoadBalancer:\t%s \n", lb)
+ fmt.Fprintf(tab, " - externalIPs:\t%s \n", eIP)
+ fmt.Fprintf(tab, " - HostPort:\t%s\n", hPort)
+ tab.Flush()
+ }
+
+ if sd.BPFMapDetails && sr.BpfMaps != nil {
+ dynamicSizingStatus := "off"
+ ratio := sr.BpfMaps.DynamicSizeRatio
+ if 0.0 < ratio && ratio <= 1.0 {
+ dynamicSizingStatus = fmt.Sprintf("on (ratio: %f)", ratio)
+ }
+ fmt.Fprintf(w, "BPF Maps:\tdynamic sizing: %s\n", dynamicSizingStatus)
+ tab := tabwriter.NewWriter(w, 0, 0, 3, ' ', 0)
+ fmt.Fprintf(tab, " Name\tSize\n")
+ for _, m := range sr.BpfMaps.Maps {
+ fmt.Fprintf(tab, " %s\t%d\n", m.Name, m.Size)
+ }
+ tab.Flush()
+ }
+
+ if sr.Encryption != nil {
+ var fields []string
+
+ if sr.Encryption.Msg != "" {
+ fields = append(fields, sr.Encryption.Msg)
+ } else if wg := sr.Encryption.Wireguard; wg != nil {
+ fields = append(fields, fmt.Sprintf("[NodeEncryption: %s", wg.NodeEncryption))
+ ifaces := make([]string, 0, len(wg.Interfaces))
+ for _, i := range wg.Interfaces {
+ iface := fmt.Sprintf("%s (Pubkey: %s, Port: %d, Peers: %d)",
+ i.Name, i.PublicKey, i.ListenPort, i.PeerCount)
+ ifaces = append(ifaces, iface)
+ }
+ fields = append(fields, fmt.Sprintf("%s]", strings.Join(ifaces, ", ")))
+ }
+
+ fmt.Fprintf(w, "Encryption:\t%s\t%s\n", sr.Encryption.Mode, strings.Join(fields, ", "))
+ }
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/client/config.go b/vendor/github.com/cilium/cilium/pkg/client/config.go
new file mode 100644
index 000000000..3775abe2e
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/client/config.go
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package client
+
+import (
+ "github.com/cilium/cilium/api/v1/client/daemon"
+ "github.com/cilium/cilium/api/v1/models"
+ "github.com/cilium/cilium/pkg/api"
+)
+
+// ConfigGet returns a daemon configuration.
+func (c *Client) ConfigGet() (*models.DaemonConfiguration, error) {
+ resp, err := c.Daemon.GetConfig(nil)
+ if err != nil {
+ return nil, Hint(err)
+ }
+ return resp.Payload, nil
+}
+
+// ConfigPatch modifies the daemon configuration.
+func (c *Client) ConfigPatch(cfg models.DaemonConfigurationSpec) error {
+ fullCfg, err := c.ConfigGet()
+ if err != nil {
+ return err
+ }
+
+ for opt, value := range cfg.Options {
+ fullCfg.Spec.Options[opt] = value
+ }
+ if cfg.PolicyEnforcement != "" {
+ fullCfg.Spec.PolicyEnforcement = cfg.PolicyEnforcement
+ }
+
+ params := daemon.NewPatchConfigParams().WithConfiguration(fullCfg.Spec).WithTimeout(api.ClientTimeout)
+ _, err = c.Daemon.PatchConfig(params)
+ return Hint(err)
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/client/endpoint.go b/vendor/github.com/cilium/cilium/pkg/client/endpoint.go
new file mode 100644
index 000000000..d412cf91c
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/client/endpoint.go
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package client
+
+import (
+ "github.com/cilium/cilium/api/v1/client/endpoint"
+ "github.com/cilium/cilium/api/v1/models"
+ "github.com/cilium/cilium/pkg/api"
+ pkgEndpointID "github.com/cilium/cilium/pkg/endpoint/id"
+ "github.com/cilium/cilium/pkg/labels"
+)
+
+// EndpointList returns a list of all endpoints
+func (c *Client) EndpointList() ([]*models.Endpoint, error) {
+ resp, err := c.Endpoint.GetEndpoint(nil)
+ if err != nil {
+ return nil, Hint(err)
+ }
+ return resp.Payload, nil
+}
+
+// EndpointDeleteMany deletes multiple endpoints
+func (c *Client) EndpointDeleteMany(req *models.EndpointBatchDeleteRequest) error {
+ params := endpoint.NewDeleteEndpointParams().WithEndpoint(req).WithTimeout(api.ClientTimeout)
+ _, _, err := c.Endpoint.DeleteEndpoint(params)
+ return Hint(err)
+}
+
+// EndpointGet returns endpoint by ID
+func (c *Client) EndpointGet(id string) (*models.Endpoint, error) {
+ params := endpoint.NewGetEndpointIDParams().WithID(id).WithTimeout(api.ClientTimeout)
+ resp, err := c.Endpoint.GetEndpointID(params)
+ if err != nil {
+ /* Since plugins rely on checking the error type, we don't wrap this
+ * with Hint(...)
+ */
+ return nil, err
+ }
+ return resp.Payload, nil
+}
+
+// EndpointCreate creates a new endpoint
+func (c *Client) EndpointCreate(ep *models.EndpointChangeRequest) error {
+ id := pkgEndpointID.NewCiliumID(ep.ID)
+ params := endpoint.NewPutEndpointIDParams().WithID(id).WithEndpoint(ep).WithTimeout(api.ClientTimeout)
+ _, err := c.Endpoint.PutEndpointID(params)
+ return Hint(err)
+}
+
+// EndpointPatch modifies the endpoint
+func (c *Client) EndpointPatch(id string, ep *models.EndpointChangeRequest) error {
+ params := endpoint.NewPatchEndpointIDParams().WithID(id).WithEndpoint(ep).WithTimeout(api.ClientTimeout)
+ _, err := c.Endpoint.PatchEndpointID(params)
+ return Hint(err)
+}
+
+// EndpointDelete deletes endpoint
+func (c *Client) EndpointDelete(id string) error {
+ params := endpoint.NewDeleteEndpointIDParams().WithID(id).WithTimeout(api.ClientTimeout)
+ _, _, err := c.Endpoint.DeleteEndpointID(params)
+ return Hint(err)
+}
+
+// EndpointLogGet returns endpoint log
+func (c *Client) EndpointLogGet(id string) (models.EndpointStatusLog, error) {
+ params := endpoint.NewGetEndpointIDLogParams().WithID(id).WithTimeout(api.ClientTimeout)
+ resp, err := c.Endpoint.GetEndpointIDLog(params)
+ if err != nil {
+ return nil, Hint(err)
+ }
+ return resp.Payload, nil
+}
+
+// EndpointHealthGet returns endpoint healthz
+func (c *Client) EndpointHealthGet(id string) (*models.EndpointHealth, error) {
+ params := endpoint.NewGetEndpointIDHealthzParams().WithID(id).WithTimeout(api.ClientTimeout)
+ resp, err := c.Endpoint.GetEndpointIDHealthz(params)
+ if err != nil {
+ return nil, Hint(err)
+ }
+ return resp.Payload, nil
+}
+
+// EndpointConfigGet returns endpoint configuration
+func (c *Client) EndpointConfigGet(id string) (*models.EndpointConfigurationStatus, error) {
+ params := endpoint.NewGetEndpointIDConfigParams().WithID(id).WithTimeout(api.ClientTimeout)
+ resp, err := c.Endpoint.GetEndpointIDConfig(params)
+ if err != nil {
+ return nil, Hint(err)
+ }
+ return resp.Payload, nil
+}
+
+// EndpointConfigPatch modifies endpoint configuration
+func (c *Client) EndpointConfigPatch(id string, cfg *models.EndpointConfigurationSpec) error {
+ params := endpoint.NewPatchEndpointIDConfigParams().WithID(id).WithTimeout(api.ClientTimeout)
+ if cfg != nil {
+ params.SetEndpointConfiguration(cfg)
+ }
+
+ _, err := c.Endpoint.PatchEndpointIDConfig(params)
+ return Hint(err)
+}
+
+// EndpointLabelsGet returns endpoint label configuration
+func (c *Client) EndpointLabelsGet(id string) (*models.LabelConfiguration, error) {
+ params := endpoint.NewGetEndpointIDLabelsParams().WithID(id).WithTimeout(api.ClientTimeout)
+ resp, err := c.Endpoint.GetEndpointIDLabels(params)
+ if err != nil {
+ return nil, Hint(err)
+ }
+ return resp.Payload, nil
+}
+
+// EndpointLabelsPut modifies endpoint label configuration
+// add: List of labels to add and enable. If the label is an orchestration
+// system label which has been disabled before, it will be removed from
+// the disabled list and readded to the orchestration list. Otherwise
+// it will be added to the custom label list.
+//
+// delete: List of labels to delete. If the label is an orchestration system
+// label, then it will be deleted from the orchestration list and
+// added to the disabled list. Otherwise it will be removed from the
+// custom list.
+func (c *Client) EndpointLabelsPatch(id string, toAdd, toDelete models.Labels) error {
+ currentCfg, err := c.EndpointLabelsGet(id)
+ if err != nil {
+ return err
+ }
+
+ userLbl := labels.NewLabelsFromModel(currentCfg.Status.Realized.User)
+ for _, lbl := range toAdd {
+ lblParsed := labels.ParseLabel(lbl)
+ if _, found := userLbl[lblParsed.Key]; !found {
+ userLbl[lblParsed.Key] = lblParsed
+ }
+ }
+ for _, lbl := range toDelete {
+ lblParsed := labels.ParseLabel(lbl)
+ delete(userLbl, lblParsed.Key)
+ }
+ currentCfg.Spec.User = userLbl.GetModel()
+
+ params := endpoint.NewPatchEndpointIDLabelsParams().WithID(id).WithTimeout(api.ClientTimeout)
+ _, err = c.Endpoint.PatchEndpointIDLabels(params.WithConfiguration(currentCfg.Spec))
+ return Hint(err)
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/client/identity.go b/vendor/github.com/cilium/cilium/pkg/client/identity.go
new file mode 100644
index 000000000..483509ea7
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/client/identity.go
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package client
+
+import (
+ "github.com/cilium/cilium/api/v1/client/policy"
+ "github.com/cilium/cilium/api/v1/models"
+ "github.com/cilium/cilium/pkg/api"
+)
+
+// IdentityGet returns a security identity.
+func (c *Client) IdentityGet(id string) (*models.Identity, error) {
+ params := policy.NewGetIdentityIDParams().WithID(id).WithTimeout(api.ClientTimeout)
+
+ resp, err := c.Policy.GetIdentityID(params)
+ if err != nil {
+ return nil, Hint(err)
+ }
+ return resp.Payload, nil
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/client/ipam.go b/vendor/github.com/cilium/cilium/pkg/client/ipam.go
new file mode 100644
index 000000000..e50bbfb51
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/client/ipam.go
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package client
+
+import (
+ "github.com/cilium/cilium/api/v1/client/ipam"
+ "github.com/cilium/cilium/api/v1/models"
+ "github.com/cilium/cilium/pkg/api"
+)
+
+const (
+ AddressFamilyIPv6 = "ipv6"
+ AddressFamilyIPv4 = "ipv4"
+)
+
+// IPAMAllocate allocates an IP address out of address family specific pool.
+func (c *Client) IPAMAllocate(family, owner, pool string, expiration bool) (*models.IPAMResponse, error) {
+ params := ipam.NewPostIpamParams().WithTimeout(api.ClientTimeout)
+
+ if family != "" {
+ params.SetFamily(&family)
+ }
+ if owner != "" {
+ params.SetOwner(&owner)
+ }
+ if pool != "" {
+ params.SetPool(&pool)
+ }
+ params.SetExpiration(&expiration)
+
+ resp, err := c.Ipam.PostIpam(params)
+ if err != nil {
+ return nil, Hint(err)
+ }
+ return resp.Payload, nil
+}
+
+// IPAMAllocateIP tries to allocate a particular IP address.
+func (c *Client) IPAMAllocateIP(ip, owner, pool string) error {
+ params := ipam.NewPostIpamIPParams().WithIP(ip).WithOwner(&owner).WithTimeout(api.ClientTimeout)
+ if pool != "" {
+ params.SetPool(&pool)
+ }
+ _, err := c.Ipam.PostIpamIP(params)
+ return Hint(err)
+}
+
+// IPAMReleaseIP releases a IP address back to the pool.
+func (c *Client) IPAMReleaseIP(ip, pool string) error {
+ params := ipam.NewDeleteIpamIPParams().WithIP(ip).WithTimeout(api.ClientTimeout)
+ if pool != "" {
+ params.SetPool(&pool)
+ }
+ _, err := c.Ipam.DeleteIpamIP(params)
+ return Hint(err)
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/client/lrp.go b/vendor/github.com/cilium/cilium/pkg/client/lrp.go
new file mode 100644
index 000000000..c1108b497
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/client/lrp.go
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package client
+
+import (
+ "github.com/cilium/cilium/api/v1/models"
+)
+
+// GetLRPs returns a list of all local redirect policies.
+func (c *Client) GetLRPs() ([]*models.LRPSpec, error) {
+ resp, err := c.Service.GetLrp(nil)
+ if err != nil {
+ return nil, Hint(err)
+ }
+ return resp.Payload, nil
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/client/policy.go b/vendor/github.com/cilium/cilium/pkg/client/policy.go
new file mode 100644
index 000000000..f90cd86a7
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/client/policy.go
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package client
+
+import (
+ "github.com/cilium/cilium/api/v1/client/policy"
+ "github.com/cilium/cilium/api/v1/models"
+ "github.com/cilium/cilium/pkg/api"
+)
+
+// PolicyPut inserts the `policyJSON`
+func (c *Client) PolicyPut(policyJSON string) (*models.Policy, error) {
+ params := policy.NewPutPolicyParams().WithPolicy(policyJSON).WithTimeout(api.ClientTimeout)
+ resp, err := c.Policy.PutPolicy(params)
+ if err != nil {
+ return nil, Hint(err)
+ }
+ return resp.Payload, nil
+}
+
+// PolicyReplace replaces the `policyJSON`
+func (c *Client) PolicyReplace(policyJSON string, replace bool, replaceWithLabels []string) (*models.Policy, error) {
+ params := policy.NewPutPolicyParams().WithPolicy(policyJSON).WithReplace(&replace).WithReplaceWithLabels(replaceWithLabels).WithTimeout(api.ClientTimeout)
+ resp, err := c.Policy.PutPolicy(params)
+ if err != nil {
+ return nil, Hint(err)
+ }
+ return resp.Payload, nil
+}
+
+// PolicyGet returns policy rules
+func (c *Client) PolicyGet(labels []string) (*models.Policy, error) {
+ params := policy.NewGetPolicyParams().WithLabels(labels).WithTimeout(api.ClientTimeout)
+ resp, err := c.Policy.GetPolicy(params)
+ if err != nil {
+ return nil, Hint(err)
+ }
+ return resp.Payload, nil
+}
+
+// PolicyCacheGet returns the contents of a SelectorCache.
+func (c *Client) PolicyCacheGet() (models.SelectorCache, error) {
+ params := policy.NewGetPolicySelectorsParams().WithTimeout(api.ClientTimeout)
+ resp, err := c.Policy.GetPolicySelectors(params)
+ if err != nil {
+ return nil, Hint(err)
+ }
+ return resp.Payload, nil
+}
+
+// PolicyDelete deletes policy rules
+func (c *Client) PolicyDelete(labels []string) (*models.Policy, error) {
+ params := policy.NewDeletePolicyParams().WithLabels(labels).WithTimeout(api.ClientTimeout)
+ resp, err := c.Policy.DeletePolicy(params)
+ if err != nil {
+ return nil, Hint(err)
+ }
+ return resp.Payload, Hint(err)
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/client/prefilter.go b/vendor/github.com/cilium/cilium/pkg/client/prefilter.go
new file mode 100644
index 000000000..79de98e1b
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/client/prefilter.go
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package client
+
+import (
+ "github.com/cilium/cilium/api/v1/client/prefilter"
+ "github.com/cilium/cilium/api/v1/models"
+ "github.com/cilium/cilium/pkg/api"
+)
+
+// GetPrefilter returns a list of all CIDR prefixes
+func (c *Client) GetPrefilter() (*models.Prefilter, error) {
+ resp, err := c.Prefilter.GetPrefilter(nil)
+ if err != nil {
+ return nil, Hint(err)
+ }
+ return resp.Payload, nil
+}
+
+// PatchPrefilter sets a list of CIDR prefixes
+func (c *Client) PatchPrefilter(spec *models.PrefilterSpec) (*models.Prefilter, error) {
+ params := prefilter.NewPatchPrefilterParams().WithPrefilterSpec(spec).WithTimeout(api.ClientTimeout)
+ resp, err := c.Prefilter.PatchPrefilter(params)
+ if err != nil {
+ return nil, Hint(err)
+ }
+ return resp.Payload, nil
+}
+
+// DeletePrefilter deletes a list of CIDR prefixes
+func (c *Client) DeletePrefilter(spec *models.PrefilterSpec) (*models.Prefilter, error) {
+ params := prefilter.NewDeletePrefilterParams().WithPrefilterSpec(spec).WithTimeout(api.ClientTimeout)
+ resp, err := c.Prefilter.DeletePrefilter(params)
+ if err != nil {
+ return nil, Hint(err)
+ }
+ return resp.Payload, nil
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/client/recorder.go b/vendor/github.com/cilium/cilium/pkg/client/recorder.go
new file mode 100644
index 000000000..4da24a72b
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/client/recorder.go
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package client
+
+import (
+ "github.com/cilium/cilium/api/v1/client/recorder"
+ "github.com/cilium/cilium/api/v1/models"
+ "github.com/cilium/cilium/pkg/api"
+)
+
+func (c *Client) GetRecorder() ([]*models.Recorder, error) {
+ resp, err := c.Recorder.GetRecorder(nil)
+ if err != nil {
+ return nil, Hint(err)
+ }
+ return resp.Payload, nil
+}
+
+func (c *Client) GetRecorderMasks() ([]*models.RecorderMask, error) {
+ resp, err := c.Recorder.GetRecorderMasks(nil)
+ if err != nil {
+ return nil, Hint(err)
+ }
+ return resp.Payload, nil
+}
+
+func (c *Client) GetRecorderID(id int64) (*models.Recorder, error) {
+ params := recorder.NewGetRecorderIDParams().WithID(id).WithTimeout(api.ClientTimeout)
+ resp, err := c.Recorder.GetRecorderID(params)
+ if err != nil {
+ return nil, Hint(err)
+ }
+ return resp.Payload, nil
+}
+
+func (c *Client) PutRecorderID(id int64, rec *models.RecorderSpec) (bool, error) {
+ params := recorder.NewPutRecorderIDParams().WithID(id).WithConfig(rec).WithTimeout(api.ClientTimeout)
+ _, created, err := c.Recorder.PutRecorderID(params)
+ return created != nil, Hint(err)
+}
+
+func (c *Client) DeleteRecorderID(id int64) error {
+ params := recorder.NewDeleteRecorderIDParams().WithID(id).WithTimeout(api.ClientTimeout)
+ _, err := c.Recorder.DeleteRecorderID(params)
+ return Hint(err)
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/client/service.go b/vendor/github.com/cilium/cilium/pkg/client/service.go
new file mode 100644
index 000000000..fa53468c7
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/client/service.go
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package client
+
+import (
+ "github.com/cilium/cilium/api/v1/client/service"
+ "github.com/cilium/cilium/api/v1/models"
+ "github.com/cilium/cilium/pkg/api"
+)
+
+// GetServices returns a list of all services.
+func (c *Client) GetServices() ([]*models.Service, error) {
+ resp, err := c.Service.GetService(nil)
+ if err != nil {
+ return nil, Hint(err)
+ }
+ return resp.Payload, nil
+}
+
+// GetServiceID returns a service by ID.
+func (c *Client) GetServiceID(id int64) (*models.Service, error) {
+ params := service.NewGetServiceIDParams().WithID(id).WithTimeout(api.ClientTimeout)
+ resp, err := c.Service.GetServiceID(params)
+ if err != nil {
+ return nil, Hint(err)
+ }
+ return resp.Payload, nil
+}
+
+// PutServiceID creates or updates a service. Returns true if service was created.
+func (c *Client) PutServiceID(id int64, svc *models.ServiceSpec) (bool, error) {
+ svc.ID = id
+ params := service.NewPutServiceIDParams().WithID(id).WithConfig(svc).WithTimeout(api.ClientTimeout)
+ _, created, err := c.Service.PutServiceID(params)
+ return created != nil, Hint(err)
+}
+
+// DeleteServiceID deletes a service by ID.
+func (c *Client) DeleteServiceID(id int64) error {
+ params := service.NewDeleteServiceIDParams().WithID(id).WithTimeout(api.ClientTimeout)
+ _, err := c.Service.DeleteServiceID(params)
+ return Hint(err)
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/clustermesh/types/addressing.go b/vendor/github.com/cilium/cilium/pkg/clustermesh/types/addressing.go
index 176bee29b..8e30e81c8 100644
--- a/vendor/github.com/cilium/cilium/pkg/clustermesh/types/addressing.go
+++ b/vendor/github.com/cilium/cilium/pkg/clustermesh/types/addressing.go
@@ -221,7 +221,7 @@ func (ac AddrCluster) AsNetIP() net.IP {
}
func (ac AddrCluster) AsPrefixCluster() PrefixCluster {
- return PrefixClusterFrom(ac.addr, ac.addr.BitLen(), ac.clusterID)
+ return PrefixClusterFrom(ac.addr, ac.addr.BitLen(), WithClusterID(ac.clusterID))
}
// PrefixCluster is a type that holds a pair of prefix and ClusterID.
@@ -292,14 +292,21 @@ func (pc PrefixCluster) IsSingleIP() bool {
return pc.prefix.IsSingleIP()
}
-func PrefixClusterFrom(addr netip.Addr, bits int, clusterID uint32) PrefixCluster {
- return PrefixCluster{
- prefix: netip.PrefixFrom(addr, bits),
- clusterID: clusterID,
+type PrefixClusterOpts func(*PrefixCluster)
+
+func WithClusterID(id uint32) PrefixClusterOpts {
+ return func(pc *PrefixCluster) { pc.clusterID = id }
+}
+
+func PrefixClusterFrom(addr netip.Addr, bits int, opts ...PrefixClusterOpts) PrefixCluster {
+ pc := PrefixCluster{prefix: netip.PrefixFrom(addr, bits)}
+ for _, opt := range opts {
+ opt(&pc)
}
+ return pc
}
-func PrefixClusterFromCIDR(c *cidr.CIDR, clusterID uint32) PrefixCluster {
+func PrefixClusterFromCIDR(c *cidr.CIDR, opts ...PrefixClusterOpts) PrefixCluster {
if c == nil {
return PrefixCluster{}
}
@@ -310,10 +317,7 @@ func PrefixClusterFromCIDR(c *cidr.CIDR, clusterID uint32) PrefixCluster {
}
ones, _ := c.Mask.Size()
- return PrefixCluster{
- prefix: netip.PrefixFrom(addr, ones),
- clusterID: clusterID,
- }
+ return PrefixClusterFrom(addr, ones, opts...)
}
func (pc0 PrefixCluster) Equal(pc1 PrefixCluster) bool {
diff --git a/vendor/github.com/cilium/cilium/pkg/clustermesh/types/option.go b/vendor/github.com/cilium/cilium/pkg/clustermesh/types/option.go
new file mode 100644
index 000000000..2bc8c9af2
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/clustermesh/types/option.go
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package types
+
+import (
+ "fmt"
+
+ "github.com/spf13/pflag"
+
+ "github.com/cilium/cilium/pkg/defaults"
+)
+
+const (
+ // OptClusterName is the name of the OptClusterName option
+ OptClusterName = "cluster-name"
+
+ // OptClusterID is the name of the OptClusterID option
+ OptClusterID = "cluster-id"
+)
+
+// ClusterInfo groups together the ClusterID and the ClusterName
+type ClusterInfo struct {
+ ID uint32 `mapstructure:"cluster-id"`
+ Name string `mapstructure:"cluster-name"`
+}
+
+// DefaultClusterInfo represents the default ClusterInfo values.
+var DefaultClusterInfo = ClusterInfo{
+ ID: 0,
+ Name: defaults.ClusterName,
+}
+
+// Flags implements the cell.Flagger interface, to register the given flags.
+func (def ClusterInfo) Flags(flags *pflag.FlagSet) {
+ flags.Uint32(OptClusterID, def.ID, "Unique identifier of the cluster")
+ flags.String(OptClusterName, def.Name, "Name of the cluster")
+}
+
+// Validate validates that the ClusterID is in the valid range (including ClusterID == 0),
+// and that the ClusterName is different from the default value if the ClusterID != 0.
+func (c ClusterInfo) Validate() error {
+ if c.ID < ClusterIDMin || c.ID > ClusterIDMax {
+ return fmt.Errorf("invalid cluster id %d: must be in range %d..%d",
+ c.ID, ClusterIDMin, ClusterIDMax)
+ }
+
+ return c.validateName()
+}
+
+// ValidateStrict validates that the ClusterID is in the valid range, but not 0,
+// and that the ClusterName is different from the default value.
+func (c ClusterInfo) ValidateStrict() error {
+ if err := ValidateClusterID(c.ID); err != nil {
+ return err
+ }
+
+ return c.validateName()
+}
+
+func (c ClusterInfo) validateName() error {
+ if c.ID != 0 && c.Name == defaults.ClusterName {
+ return fmt.Errorf("cannot use default cluster name (%s) with option %s",
+ defaults.ClusterName, OptClusterID)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/clustermesh/types/types.go b/vendor/github.com/cilium/cilium/pkg/clustermesh/types/types.go
index e92f269fd..783ec7780 100644
--- a/vendor/github.com/cilium/cilium/pkg/clustermesh/types/types.go
+++ b/vendor/github.com/cilium/cilium/pkg/clustermesh/types/types.go
@@ -4,6 +4,7 @@
package types
import (
+ "errors"
"fmt"
)
@@ -43,16 +44,24 @@ type CiliumClusterConfigCapabilities struct {
Cached bool `json:"cached,omitempty"`
}
-func (c *CiliumClusterConfig) Validate() error {
+// ValidationMode defines if a missing CiliumClusterConfig should be allowed for
+// backward compatibility, or it should be flagged as an error.
+type ValidationMode bool
+
+const (
+ BackwardCompatible ValidationMode = false
+ Strict ValidationMode = true
+)
+
+// Validate validates the configuration correctness. When the validation mode
+// is BackwardCompatible, a missing configuration or with ID=0 is allowed for
+// backward compatibility, otherwise it is flagged as an error.
+func (c *CiliumClusterConfig) Validate(mode ValidationMode) error {
if c == nil || c.ID == 0 {
- // When remote cluster doesn't have cluster config, we
- // currently just bypass the validation for compatibility.
- // Otherwise, we cannot connect with older cluster which
- // doesn't support cluster config feature.
- //
- // When we introduce a new cluster config can't be ignored,
- // we should properly check it here and return error. Now
- // we only have ClusterID which used to be ignored.
+ if mode == Strict {
+ return errors.New("remote cluster is missing cluster configuration")
+ }
+
return nil
}
@@ -62,9 +71,3 @@ func (c *CiliumClusterConfig) Validate() error {
return nil
}
-
-// ClusterIDName groups together the ClusterID and the ClusterName
-type ClusterIDName struct {
- ClusterID uint32
- ClusterName string
-}
diff --git a/vendor/github.com/cilium/cilium/pkg/command/exec/doc.go b/vendor/github.com/cilium/cilium/pkg/command/exec/doc.go
new file mode 100644
index 000000000..959e90373
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/command/exec/doc.go
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Package exec provides useful wrappers around the standard "exec" library.
+package exec
diff --git a/vendor/github.com/cilium/cilium/pkg/command/exec/exec.go b/vendor/github.com/cilium/cilium/pkg/command/exec/exec.go
new file mode 100644
index 000000000..f5bd6400e
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/command/exec/exec.go
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package exec
+
+import (
+ "bufio"
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "os/exec"
+ "time"
+
+ "github.com/sirupsen/logrus"
+)
+
+func warnToLog(cmd *exec.Cmd, out []byte, scopedLog *logrus.Entry, err error) {
+ scopedLog.WithError(err).WithField("cmd", cmd.Args).Error("Command execution failed")
+ scanner := bufio.NewScanner(bytes.NewReader(out))
+ for scanner.Scan() {
+ scopedLog.Warn(scanner.Text())
+ }
+}
+
+// combinedOutput is the core implementation of catching deadline exceeded
+// options and logging errors.
+func combinedOutput(ctx context.Context, cmd *exec.Cmd, scopedLog *logrus.Entry, verbose bool) ([]byte, error) {
+ out, err := cmd.CombinedOutput()
+ if ctx.Err() != nil {
+ if !errors.Is(ctx.Err(), context.Canceled) {
+ scopedLog.WithError(err).WithField("cmd", cmd.Args).Error("Command execution failed")
+ }
+ return nil, fmt.Errorf("Command execution failed for %s: %w", cmd.Args, ctx.Err())
+ }
+ if err != nil && verbose {
+ warnToLog(cmd, out, scopedLog, err)
+ }
+ return out, err
+}
+
+// output is the equivalent to combinedOutput with only capturing stdout
+func output(ctx context.Context, cmd *exec.Cmd, scopedLog *logrus.Entry, verbose bool) ([]byte, error) {
+ out, err := cmd.Output()
+ if ctx.Err() != nil {
+ if !errors.Is(ctx.Err(), context.Canceled) {
+ scopedLog.WithError(err).WithField("cmd", cmd.Args).Error("Command execution failed")
+ }
+ return nil, fmt.Errorf("Command execution failed for %s: %w", cmd.Args, ctx.Err())
+ }
+ if err != nil {
+ var exitErr *exec.ExitError
+ if errors.As(err, &exitErr) {
+ err = fmt.Errorf("%w stderr=%q", exitErr, exitErr.Stderr)
+ }
+ if verbose {
+ warnToLog(cmd, out, scopedLog, err)
+ }
+ }
+ return out, err
+}
+
+// Cmd wraps exec.Cmd with a context to provide convenient execution of a
+// command with nice checking of the context timeout in the form:
+//
+// err := exec.Prog().WithTimeout(5*time.Second, myprog, myargs...).CombinedOutput(log, verbose)
+type Cmd struct {
+ *exec.Cmd
+ ctx context.Context
+ cancelFn func()
+}
+
+// CommandContext wraps exec.CommandContext to allow this package to be used as
+// a drop-in replacement for the standard exec library.
+func CommandContext(ctx context.Context, prog string, args ...string) *Cmd {
+ return &Cmd{
+ Cmd: exec.CommandContext(ctx, prog, args...),
+ ctx: ctx,
+ }
+}
+
+// WithTimeout creates a Cmd with a context that times out after the specified
+// duration.
+func WithTimeout(timeout time.Duration, prog string, args ...string) *Cmd {
+ ctx, cancel := context.WithTimeout(context.Background(), timeout)
+ cmd := CommandContext(ctx, prog, args...)
+ cmd.cancelFn = cancel
+ return cmd
+}
+
+// WithCancel creates a Cmd with a context that can be cancelled by calling the
+// resulting Cancel() function.
+func WithCancel(ctx context.Context, prog string, args ...string) (*Cmd, context.CancelFunc) {
+ newCtx, cancel := context.WithCancel(ctx)
+ cmd := CommandContext(newCtx, prog, args...)
+ return cmd, cancel
+}
+
+// CombinedOutput runs the command and returns its combined standard output and
+// standard error. Unlike the standard library, if the context is exceeded, it
+// will return an error indicating so.
+//
+// Logs any errors that occur to the specified logger.
+func (c *Cmd) CombinedOutput(scopedLog *logrus.Entry, verbose bool) ([]byte, error) {
+ out, err := combinedOutput(c.ctx, c.Cmd, scopedLog, verbose)
+ if c.cancelFn != nil {
+ c.cancelFn()
+ }
+ return out, err
+}
+
+// Output runs the command and returns only standard output, but not the
+// standard error. Unlike the standard library, if the context is exceeded,
+// it will return an error indicating so.
+//
+// Logs any errors that occur to the specified logger.
+func (c *Cmd) Output(scopedLog *logrus.Entry, verbose bool) ([]byte, error) {
+ out, err := output(c.ctx, c.Cmd, scopedLog, verbose)
+ if c.cancelFn != nil {
+ c.cancelFn()
+ }
+ return out, err
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/common/const.go b/vendor/github.com/cilium/cilium/pkg/common/const.go
new file mode 100644
index 000000000..365d9ae28
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/common/const.go
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package common
+
+const (
+ // Miscellaneous dedicated constants
+
+ // CHeaderFileName is the name of the C header file for BPF programs for a
+ // particular endpoint.
+ CHeaderFileName = "ep_config.h"
+
+ // PossibleCPUSysfsPath is used to retrieve the number of CPUs for per-CPU maps.
+ PossibleCPUSysfsPath = "/sys/devices/system/cpu/possible"
+)
diff --git a/vendor/github.com/cilium/cilium/pkg/common/utils.go b/vendor/github.com/cilium/cilium/pkg/common/utils.go
new file mode 100644
index 000000000..b8a5a5d40
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/common/utils.go
@@ -0,0 +1,137 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package common
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "strconv"
+ "strings"
+
+ "github.com/sirupsen/logrus"
+
+ "github.com/cilium/cilium/pkg/safeio"
+)
+
+// C2GoArray transforms an hexadecimal string representation into a byte slice.
+// Example:
+// str := "0x12, 0xff, 0x0, 0x1"
+// fmt.Print(C2GoArray(str)) //`{0x12, 0xFF, 0x0, 0x01}`"
+func C2GoArray(str string) []byte {
+ ret := []byte{}
+
+ if str == "" {
+ return ret
+ }
+
+ hexStr := strings.Split(str, ", ")
+ for _, hexDigit := range hexStr {
+ strDigit := strings.TrimPrefix(hexDigit, "0x")
+ digitUint64, err := strconv.ParseUint(strDigit, 16, 8)
+ if err != nil {
+ return nil
+ }
+ ret = append(ret, byte(digitUint64))
+ }
+ return ret
+}
+
+// GoArray2C transforms a byte slice into its hexadecimal string representation.
+// Example:
+// array := []byte{0x12, 0xFF, 0x0, 0x01}
+// fmt.Print(GoArray2C(array)) // "{ 0x12, 0xff, 0x0, 0x1 }"
+func GoArray2C(array []byte) string {
+ return goArray2C(array, true)
+}
+
+// GoArray2CNoSpaces does the same as GoArray2C, but no spaces are used in
+// the final output.
+// Example:
+// array := []byte{0x12, 0xFF, 0x0, 0x01}
+// fmt.Print(GoArray2CNoSpaces(array)) // "{0x12,0xff,0x0,0x1}"
+func GoArray2CNoSpaces(array []byte) string {
+ return goArray2C(array, false)
+}
+
+func goArray2C(array []byte, space bool) string {
+ ret := ""
+ format := ",%#x"
+ if space {
+ format = ", %#x"
+ }
+
+ for i, e := range array {
+ if i == 0 {
+ ret = ret + fmt.Sprintf("%#x", e)
+ } else {
+ ret = ret + fmt.Sprintf(format, e)
+ }
+ }
+ return ret
+}
+
+// RequireRootPrivilege checks if the user running cmd is root. If not, it exits the program
+func RequireRootPrivilege(cmd string) {
+ if os.Getuid() != 0 {
+ fmt.Fprintf(os.Stderr, "Please run %q command(s) with root privileges.\n", cmd)
+ os.Exit(1)
+ }
+}
+
+// MapStringStructToSlice returns a slice with all keys of the given
+// map[string]struct{}
+func MapStringStructToSlice(m map[string]struct{}) []string {
+ s := make([]string, 0, len(m))
+ for k := range m {
+ s = append(s, k)
+ }
+ return s
+}
+
+// GetNumPossibleCPUs returns a total number of possible CPUS, i.e. CPUs that
+// have been allocated resources and can be brought online if they are present.
+// The number is retrieved by parsing /sys/devices/system/cpu/possible.
+//
+// See https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/include/linux/cpumask.h?h=v4.19#n50
+// for more details.
+func GetNumPossibleCPUs(log logrus.FieldLogger) int {
+ f, err := os.Open(PossibleCPUSysfsPath)
+ if err != nil {
+ log.WithError(err).Errorf("unable to open %q", PossibleCPUSysfsPath)
+ return 0
+ }
+ defer f.Close()
+
+ return getNumPossibleCPUsFromReader(log, f)
+}
+
+func getNumPossibleCPUsFromReader(log logrus.FieldLogger, r io.Reader) int {
+ out, err := safeio.ReadAllLimit(r, safeio.KB)
+ if err != nil {
+ log.WithError(err).Errorf("unable to read %q to get CPU count", PossibleCPUSysfsPath)
+ return 0
+ }
+
+ var start, end int
+ count := 0
+ for _, s := range strings.Split(string(out), ",") {
+ // Go's scanf will return an error if a format cannot be fully matched.
+ // So, just ignore it, as a partial match (e.g. when there is only one
+ // CPU) is expected.
+ n, err := fmt.Sscanf(s, "%d-%d", &start, &end)
+
+ switch n {
+ case 0:
+ log.WithError(err).Errorf("failed to scan %q to retrieve number of possible CPUs!", s)
+ return 0
+ case 1:
+ count++
+ default:
+ count += (end - start + 1)
+ }
+ }
+
+ return count
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/comparator/comparator.go b/vendor/github.com/cilium/cilium/pkg/comparator/comparator.go
new file mode 100644
index 000000000..d31b0194d
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/comparator/comparator.go
@@ -0,0 +1,106 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package comparator
+
+import (
+ "github.com/kr/pretty"
+ "github.com/pmezard/go-difflib/difflib"
+)
+
+// Compare compares two interfaces and emits a unified diff as string
+func Compare(a, b interface{}) string {
+ return CompareWithNames(a, b, "a", "b")
+}
+
+// CompareWithNames compares two interfaces and emits a unified diff as string
+func CompareWithNames(a, b interface{}, nameA, nameB string) string {
+ stringA := pretty.Sprintf("%# v", a)
+ stringB := pretty.Sprintf("%# v", b)
+ diff := difflib.UnifiedDiff{
+ A: difflib.SplitLines(stringA),
+ B: difflib.SplitLines(stringB),
+ FromFile: nameA,
+ ToFile: nameB,
+ Context: 32,
+ }
+
+ out, err := difflib.GetUnifiedDiffString(diff)
+ if err != nil {
+ return err.Error()
+ }
+ return "Unified diff:\n" + out
+}
+
+// MapStringEquals returns true if both maps are equal.
+func MapStringEquals(m1, m2 map[string]string) bool {
+ switch {
+ case m1 == nil && m2 == nil:
+ return true
+ case m1 == nil && m2 != nil,
+ m1 != nil && m2 == nil,
+ len(m1) != len(m2):
+ return false
+ }
+ for k1, v1 := range m1 {
+ if v2, ok := m2[k1]; !ok || v2 != v1 {
+ return false
+ }
+ }
+ return true
+}
+
+// MapBoolEquals returns true if both maps are equal.
+func MapBoolEquals(m1, m2 map[string]bool) bool {
+ switch {
+ case m1 == nil && m2 == nil:
+ return true
+ case m1 == nil && m2 != nil,
+ m1 != nil && m2 == nil,
+ len(m1) != len(m2):
+ return false
+ }
+ for k1, v1 := range m1 {
+ if v2, ok := m2[k1]; !ok || v2 != v1 {
+ return false
+ }
+ }
+ return true
+}
+
+// MapStringEqualsIgnoreKeys returns true if both maps have the same values for
+// the keys that are not present in the 'ignoreKeys'.
+func MapStringEqualsIgnoreKeys(m1, m2 map[string]string, ignoreKeys []string) bool {
+ switch {
+ case m1 == nil && m2 == nil:
+ return true
+ case m1 == nil && m2 != nil,
+ m1 != nil && m2 == nil:
+ return false
+ }
+ ignoredM1 := 0
+ for k1, v1 := range m1 {
+ var ignore bool
+ for _, ig := range ignoreKeys {
+ if k1 == ig {
+ ignore = true
+ break
+ }
+ }
+ if ignore {
+ ignoredM1++
+ continue
+ }
+ if v2, ok := m2[k1]; !ok || v2 != v1 {
+ return false
+ }
+ }
+
+ ignoredM2 := 0
+ for _, ig := range ignoreKeys {
+ if _, ok := m2[ig]; ok {
+ ignoredM2++
+ }
+ }
+ return len(m1)-ignoredM1 == len(m2)-ignoredM2
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/components/components.go b/vendor/github.com/cilium/cilium/pkg/components/components.go
new file mode 100644
index 000000000..38f483a07
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/components/components.go
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package components
+
+import (
+ "os"
+ "strings"
+)
+
+const (
+ // CiliumAgentName is the name of cilium-agent (daemon) process name.
+ CiliumAgentName = "cilium-agent"
+ // CiliumOperatortName is the name of cilium-operator process name.
+ CiliumOperatortName = "cilium-operator"
+ // CiliumDaemonTestName is the name of test binary for daemon package.
+ CiliumDaemonTestName = "cmd.test"
+)
+
+// IsCiliumAgent checks whether the current process is cilium-agent (daemon).
+func IsCiliumAgent() bool {
+ binaryName := os.Args[0]
+ return strings.HasSuffix(binaryName, CiliumAgentName) ||
+ strings.HasSuffix(binaryName, CiliumDaemonTestName)
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/contexthelpers/context.go b/vendor/github.com/cilium/cilium/pkg/contexthelpers/context.go
new file mode 100644
index 000000000..64ebd0c44
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/contexthelpers/context.go
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package contexthelpers
+
+import (
+ "context"
+ "time"
+)
+
+type SuccessChan chan bool
+
+// NewConditionalTimeoutContext returns a context which is cancelled when
+// success is not reported within the specified timeout
+func NewConditionalTimeoutContext(ctx context.Context, timeout time.Duration) (context.Context, context.CancelFunc, SuccessChan) {
+ ch := make(SuccessChan, 1)
+ c, cancel := context.WithCancel(ctx)
+
+ go func() {
+ select {
+ case success := <-ch:
+ if !success {
+ cancel()
+ return
+ }
+ case <-time.After(timeout):
+ cancel()
+ }
+ }()
+
+ return c, cancel, ch
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/controller/cell.go b/vendor/github.com/cilium/cilium/pkg/controller/cell.go
new file mode 100644
index 000000000..53119459a
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/controller/cell.go
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package controller
+
+import (
+ "github.com/spf13/pflag"
+
+ "github.com/cilium/cilium/pkg/hive/cell"
+ "github.com/cilium/cilium/pkg/metrics"
+ "github.com/cilium/cilium/pkg/metrics/metric"
+)
+
+const (
+ controllerGroupMetrics = "controller-group-metrics"
+
+ // labelControllerGroupName is the label used
+ // to identify controller-specific metrics
+ labelControllerGroupName = "group_name"
+)
+
+var (
+ // GroupMetricEnabled is populated with the set of ControllerGroups for which metrics are enabled
+ groupMetricEnabled = map[string]bool{}
+
+ // GroupRuns is a Prometheus-compatible metric for Controller
+ // runs, labeled by completion status and Group name
+ GroupRuns = metrics.NoOpCounterVec
+)
+
+var Cell = cell.Module(
+ "controller",
+ "Controllers and Controller Lifecycle management",
+ cell.Config(Config{}),
+ cell.Metric(NewMetrics),
+ cell.Invoke(Init),
+)
+
+type Metrics struct {
+ ControllerGroupRuns metric.Vec[metric.Counter]
+}
+
+func NewMetrics() Metrics {
+ return Metrics{
+ ControllerGroupRuns: metric.NewCounterVec(metric.CounterOpts{
+ ConfigName: metrics.Namespace + "_controllers_group_runs_total",
+ Namespace: metrics.Namespace,
+ Name: "controllers_group_runs_total",
+ Help: "Number of times that a controller group was run, labeled by completion status and controller group name",
+ }, []string{labelControllerGroupName, metrics.LabelStatus}),
+ }
+}
+
+type Config struct {
+ // ControllerGroupMetrics is an option which specifies the set of ControllerGroups names
+ // for which metrics will be enabled. The special values 'all' and 'none' are supported.
+ ControllerGroupMetrics []string
+}
+
+func (cfg Config) Flags(flags *pflag.FlagSet) {
+ flags.StringSlice(controllerGroupMetrics, cfg.ControllerGroupMetrics,
+ "List of controller group names for which to to enable metrics. "+
+ "Accepts 'all' and 'none'. "+
+ "The set of controller group names available is not guaranteed to be stable between Cilium versions.")
+}
+
+func Init(cfg Config, m Metrics) {
+ // Initialize package-scoped references to Cell configuration
+ for _, name := range cfg.ControllerGroupMetrics {
+ groupMetricEnabled[name] = true
+ }
+
+ GroupRuns = m.ControllerGroupRuns
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/controller/controller.go b/vendor/github.com/cilium/cilium/pkg/controller/controller.go
new file mode 100644
index 000000000..27c8d9db8
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/controller/controller.go
@@ -0,0 +1,395 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package controller
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "math"
+ "time"
+
+ "github.com/sirupsen/logrus"
+
+ "github.com/cilium/cilium/pkg/inctimer"
+ "github.com/cilium/cilium/pkg/lock"
+ "github.com/cilium/cilium/pkg/metrics"
+)
+
+const (
+ success = "success"
+ failure = "failure"
+
+ // special Group "names" for metrics config
+ allControllerMetricsEnabled = "all"
+ noControllerMetricsEnabled = "none"
+)
+
+// ControllerFunc is a function that the controller runs. This type is used for
+// DoFunc and StopFunc.
+type ControllerFunc func(ctx context.Context) error
+
+// ExitReason is a returnable type from DoFunc that causes the
+// controller to exit. This reason is recorded in the controller's status. The
+// controller is not removed from any manager.
+// Construct one with NewExitReason("a reason")
+type ExitReason struct {
+ // This is constucted in this odd way because the type assertion in
+ // runController didn't work otherwise.
+ error
+}
+
+// NewExitReason returns a new ExitReason
+func NewExitReason(reason string) ExitReason {
+ return ExitReason{errors.New(reason)}
+}
+
+// Group contains metadata about a group of controllers
+type Group struct {
+ // Name of the controller group.
+ //
+ // This name MUST NOT be dynamically generated based on
+ // resource identifier in order to limit metrics cardinality.
+ Name string
+}
+
+func NewGroup(name string) Group {
+ return Group{Name: name}
+}
+
+// ControllerParams contains all parameters of a controller
+type ControllerParams struct {
+ // Group is used for aggregate metrics collection.
+ // The Group.Name must NOT be dynamically generated from a
+ // resource identifier in order to limit metrics cardinality.
+ Group Group
+
+ // DoFunc is the function that will be run until it succeeds and/or
+ // using the interval RunInterval if not 0.
+ // An unset DoFunc is an error and will be logged as one.
+ DoFunc ControllerFunc
+
+ // CancelDoFuncOnUpdate when set to true cancels the controller context
+ // (the DoFunc) to allow quick termination of controller
+ CancelDoFuncOnUpdate bool
+
+ // StopFunc is called when the controller stops. It is intended to run any
+ // clean-up tasks for the controller (e.g. deallocate/release resources)
+ // It is guaranteed that DoFunc is called at least once before StopFunc is
+ // called.
+ // An unset StopFunc is not an error (and will be a no-op)
+ // Note: Since this occurs on controller exit, error counts and tracking may
+ // not be checked after StopFunc is run.
+ StopFunc ControllerFunc
+
+ // If set to any other value than 0, will cause DoFunc to be run in the
+ // specified interval. The interval starts from when the DoFunc has
+ // returned last
+ RunInterval time.Duration
+
+ // If set to any other value than 0, will cap the error retry interval
+ // to the specified interval.
+ MaxRetryInterval time.Duration
+
+ // ErrorRetryBaseDuration is the initial time to wait to run DoFunc
+ // again on return of an error. On each consecutive error, this value
+ // is multiplied by the number of consecutive errors to provide a
+ // constant back off. The default is 1s.
+ ErrorRetryBaseDuration time.Duration
+
+ // NoErrorRetry when set to true, disabled retries on errors
+ NoErrorRetry bool
+
+ Context context.Context
+}
+
+// undefinedDoFunc is used when no DoFunc is set. controller.DoFunc is set to this
+// when the controller is incorrectly initialised.
+func undefinedDoFunc(name string) error {
+ return fmt.Errorf("controller %s DoFunc is nil", name)
+}
+
+// NoopFunc is a no-op placeholder for DoFunc & StopFunc.
+// It is automatically used when StopFunc is undefined, and can be used as a
+// DoFunc stub when the controller should only run StopFunc.
+func NoopFunc(ctx context.Context) error {
+ return nil
+}
+
+// isGroupMetricEnabled returns true if metrics are enabled for the Group
+//
+// The controller metrics config option is used to determine
+// if "all", "none" (takes precedence over "all"), or the
+// given set of Group names should be enabled.
+//
+// If no controller metrics config option was provided,
+// only then is the DefaultMetricsEnabled field used.
+func isGroupMetricEnabled(g Group) bool {
+ var metricsEnabled = groupMetricEnabled
+ if metricsEnabled == nil {
+ // There is currently no guarantee that a caller of this function
+ // has initialized the configuration map using the hive cell.
+ return false
+ }
+
+ if metricsEnabled[noControllerMetricsEnabled] {
+ // "none" takes precedence over "all"
+ return false
+ } else if metricsEnabled[allControllerMetricsEnabled] {
+ return true
+ } else {
+ return metricsEnabled[g.Name]
+ }
+}
+
+// Controller is a simple pattern that allows to perform the following
+// tasks:
+// - Run an operation in the background and retry until it succeeds
+// - Perform a regular sync operation in the background
+//
+// A controller has configurable retry intervals and will collect statistics
+// on number of successful runs, number of failures, last error message,
+// and last error timestamp.
+//
+// Controllers have a name and are tied to a Manager. The manager is typically
+// bound to higher level objects such as endpoint. These higher level objects
+// can then run multiple controllers to perform async tasks such as:
+// - Annotating k8s resources with values
+// - Synchronizing an object with the kvstore
+// - Any other async operation to may fail and require retries
+//
+// Embedding the Manager into higher level resources allows to bind controllers
+// to the lifetime of that object. Controllers also have a UUID to allow
+// correlating all log messages of a controller instance.
+//
+// Guidelines to writing controllers:
+// - Make sure that the task the controller performs is done in an atomic
+// fashion, e.g. if a controller modifies a resource in multiple steps, an
+// intermediate manipulation operation failing should not leave behind
+// an inconsistent state. This can typically be achieved by locking the
+// resource and rolling back or by using transactions.
+// - Controllers typically act on behalf of a higher level object such as an
+// endpoint. The controller must ensure that the higher level object is
+// properly locked when accessing any fields.
+// - Controllers run asynchronously in the background, it is the responsibility
+// of the controller to be aware of the lifecycle of the owning higher level
+// object. This is typically achieved by removing all controllers when the
+// owner dies. It is the responsibility of the owner to either lock the owner
+// in a way that will delay destruction throughout the controller run or to
+// check for the destruction throughout the run.
+type controller struct {
+ // Constant after creation, safe to access without locking
+ group Group
+ name string
+ uuid string
+ logger *logrus.Entry
+
+ // Channels written to and/or closed by the manager
+ stop chan struct{}
+ update chan ControllerParams
+ trigger chan struct{}
+
+ // terminated is closed by the controller goroutine when it terminates
+ terminated chan struct{}
+
+ // Manipulated by the controller, read by the Manager, requires locking
+ mutex lock.RWMutex
+ successCount int
+ lastSuccessStamp time.Time
+ failureCount int
+ consecutiveErrors int
+ lastError error
+ lastErrorStamp time.Time
+ lastDuration time.Duration
+}
+
+// GetSuccessCount returns the number of successful controller runs
+func (c *controller) GetSuccessCount() int {
+ c.mutex.RLock()
+ defer c.mutex.RUnlock()
+
+ return c.successCount
+}
+
+// GetFailureCount returns the number of failed controller runs
+func (c *controller) GetFailureCount() int {
+ c.mutex.RLock()
+ defer c.mutex.RUnlock()
+
+ return c.failureCount
+}
+
+// GetLastError returns the last error returned
+func (c *controller) GetLastError() error {
+ c.mutex.RLock()
+ defer c.mutex.RUnlock()
+
+ return c.lastError
+}
+
+// GetLastErrorTimestamp returns the last error returned
+func (c *controller) GetLastErrorTimestamp() time.Time {
+ c.mutex.RLock()
+ defer c.mutex.RUnlock()
+
+ return c.lastErrorStamp
+}
+
+func (c *controller) runController(params ControllerParams) {
+ errorRetries := 1
+
+ runTimer, timerDone := inctimer.New()
+ defer timerDone()
+
+ for {
+ var err error
+
+ interval := params.RunInterval
+
+ start := time.Now()
+ err = params.DoFunc(params.Context)
+ duration := time.Since(start)
+
+ c.mutex.Lock()
+ c.lastDuration = duration
+ c.getLogger().Debug("Controller func execution time: ", c.lastDuration)
+
+ if err != nil {
+ if params.Context.Err() != nil {
+ // The controller's context was canceled. Let's wait for the
+ // next controller update (or stop).
+ err = NewExitReason("controller context canceled")
+ }
+
+ switch err := err.(type) {
+ case ExitReason:
+ // This is actually not an error case, but it causes an exit
+ c.recordSuccess()
+ c.lastError = err // This will be shown in the controller status
+
+ // Don't exit the goroutine, since that only happens when the
+ // controller is explicitly stopped. Instead, just wait for
+ // the next update.
+ c.getLogger().Debug("Controller run succeeded; waiting for next controller update or stop")
+ interval = time.Duration(math.MaxInt64)
+
+ default:
+ c.getLogger().WithField(fieldConsecutiveErrors, errorRetries).
+ WithError(err).Debug("Controller run failed")
+ c.recordError(err)
+
+ if !params.NoErrorRetry {
+ if params.ErrorRetryBaseDuration != time.Duration(0) {
+ interval = time.Duration(errorRetries) * params.ErrorRetryBaseDuration
+ } else {
+ interval = time.Duration(errorRetries) * time.Second
+ }
+
+ if params.MaxRetryInterval > 0 && interval > params.MaxRetryInterval {
+ c.getLogger().WithFields(logrus.Fields{
+ "calculatedInterval": interval,
+ "maxAllowedInterval": params.MaxRetryInterval,
+ }).Debug("Cap retry interval to max allowed value")
+ interval = params.MaxRetryInterval
+ }
+
+ errorRetries++
+ }
+ }
+ } else {
+ c.recordSuccess()
+
+ // reset error retries after successful attempt
+ errorRetries = 1
+
+ // If no run interval is specified, no further updates
+ // are required.
+ if interval == time.Duration(0) {
+ // Don't exit the goroutine, since that only happens when the
+ // controller is explicitly stopped. Instead, just wait for
+ // the next update.
+ c.getLogger().Debug("Controller run succeeded; waiting for next controller update or stop")
+ interval = time.Duration(math.MaxInt64)
+ }
+ }
+
+ c.mutex.Unlock()
+
+ select {
+ case <-c.stop:
+ goto shutdown
+
+ case params = <-c.update:
+ // update channel is never closed
+ case <-runTimer.After(interval):
+ // timer channel is not yet closed
+ case <-c.trigger:
+ // trigger channel is never closed
+ }
+
+ // If we receive a signal on multiple channels golang will pick one randomly.
+ // This select will make sure we don't execute the controller
+ // while we are shutting down.
+ select {
+ case <-c.stop:
+ goto shutdown
+ default:
+ }
+ }
+
+shutdown:
+ c.getLogger().Debug("Shutting down controller")
+
+ if err := params.StopFunc(context.TODO()); err != nil {
+ c.mutex.Lock()
+ c.recordError(err)
+ c.mutex.Unlock()
+ c.getLogger().WithField(fieldConsecutiveErrors, errorRetries).
+ WithError(err).Warn("Error on Controller stop")
+ }
+
+ close(c.terminated)
+}
+
+// logger returns a logrus object with controllerName and UUID fields.
+func (c *controller) getLogger() *logrus.Entry {
+ if c.logger == nil {
+ c.logger = log.WithFields(logrus.Fields{
+ fieldControllerName: c.name,
+ fieldUUID: c.uuid,
+ })
+ }
+
+ return c.logger
+}
+
+// recordError updates all statistic collection variables on error
+// c.mutex must be held.
+func (c *controller) recordError(err error) {
+ c.lastError = err
+ c.lastErrorStamp = time.Now()
+ c.failureCount++
+ c.consecutiveErrors++
+
+ metrics.ControllerRuns.WithLabelValues(failure).Inc()
+ if isGroupMetricEnabled(c.group) {
+ GroupRuns.WithLabelValues(c.group.Name, failure).Inc()
+ }
+ metrics.ControllerRunsDuration.WithLabelValues(failure).Observe(c.lastDuration.Seconds())
+}
+
+// recordSuccess updates all statistic collection variables on success
+// c.mutex must be held.
+func (c *controller) recordSuccess() {
+ c.lastError = nil
+ c.lastSuccessStamp = time.Now()
+ c.successCount++
+ c.consecutiveErrors = 0
+
+ metrics.ControllerRuns.WithLabelValues(success).Inc()
+ if isGroupMetricEnabled(c.group) {
+ GroupRuns.WithLabelValues(c.group.Name, success).Inc()
+ }
+ metrics.ControllerRunsDuration.WithLabelValues(success).Observe(c.lastDuration.Seconds())
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/controller/doc.go b/vendor/github.com/cilium/cilium/pkg/controller/doc.go
new file mode 100644
index 000000000..0bab327fd
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/controller/doc.go
@@ -0,0 +1,6 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Package controller provide a simple pattern for async operations that
+// require retries and/or regular intervals.
+package controller
diff --git a/vendor/github.com/cilium/cilium/pkg/controller/logfields.go b/vendor/github.com/cilium/cilium/pkg/controller/logfields.go
new file mode 100644
index 000000000..ff1446925
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/controller/logfields.go
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package controller
+
+import (
+ "github.com/cilium/cilium/pkg/logging"
+ "github.com/cilium/cilium/pkg/logging/logfields"
+)
+
+// logging field definitions
+const (
+ // fieldControllerName is the name of the controller
+ fieldControllerName = "name"
+
+ // fieldUUID is the UUID of the controller
+ fieldUUID = "uuid"
+
+ // fieldConsecutiveErrors is the number of consecutive errors of a controller
+ fieldConsecutiveErrors = "consecutiveErrors"
+)
+
+var (
+ // log is the controller package logger object.
+ log = logging.DefaultLogger.WithField(logfields.LogSubsys, "controller")
+)
diff --git a/vendor/github.com/cilium/cilium/pkg/controller/manager.go b/vendor/github.com/cilium/cilium/pkg/controller/manager.go
new file mode 100644
index 000000000..e7c4ab615
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/controller/manager.go
@@ -0,0 +1,374 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package controller
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/go-openapi/strfmt"
+ "github.com/google/uuid"
+
+ "github.com/cilium/cilium/api/v1/models"
+ "github.com/cilium/cilium/pkg/lock"
+ "github.com/cilium/cilium/pkg/option"
+)
+
+var (
+ // globalStatus is the global status of all controllers
+ globalStatus = NewManager()
+)
+
+type controllerMap map[string]*managedController
+
+// Manager is a list of controllers
+type Manager struct {
+ controllers controllerMap
+ mutex lock.RWMutex
+}
+
+// NewManager allocates a new manager
+func NewManager() *Manager {
+ return &Manager{
+ controllers: controllerMap{},
+ }
+}
+
+// GetGlobalStatus returns the status of all controllers
+func GetGlobalStatus() models.ControllerStatuses {
+ return globalStatus.GetStatusModel()
+}
+
+// UpdateController installs or updates a controller in the
+// manager. A controller is primarily identified by its name.
+// If a controller with the name already exists, the controller
+// will be shut down and replaced with the provided controller.
+//
+// Updating a controller will cause the DoFunc to be run immediately regardless
+// of any previous conditions. It will also cause any statistics to be reset.
+func (m *Manager) UpdateController(name string, params ControllerParams) {
+ m.updateController(name, params)
+}
+
+func (m *Manager) updateController(name string, params ControllerParams) *managedController {
+ start := time.Now()
+
+ m.mutex.Lock()
+ defer m.mutex.Unlock()
+
+ if m.controllers == nil {
+ m.controllers = controllerMap{}
+ }
+
+ if params.Group.Name == "" {
+ log.Errorf(
+ "Controller initialized with unpopulated group information. " +
+ "Metrics will not be exported for this controller.")
+ }
+
+ ctrl, exists := m.controllers[name]
+ if exists {
+ ctrl.getLogger().Debug("Updating existing controller")
+ ctrl.updateParamsLocked(params)
+
+ // Notify the goroutine of the params update.
+ select {
+ case ctrl.update <- ctrl.params:
+ default:
+ }
+
+ ctrl.getLogger().Debug("Controller update time: ", time.Since(start))
+ } else {
+ return m.createControllerLocked(name, params)
+ }
+
+ return ctrl
+}
+
+func (m *Manager) createControllerLocked(name string, params ControllerParams) *managedController {
+ ctrl := &managedController{
+ controller: controller{
+ name: name,
+ group: params.Group,
+ uuid: uuid.New().String(),
+ stop: make(chan struct{}),
+ update: make(chan ControllerParams, 1),
+ trigger: make(chan struct{}, 1),
+ terminated: make(chan struct{}),
+ },
+ }
+ ctrl.updateParamsLocked(params)
+ ctrl.getLogger().Debug("Starting new controller")
+
+ m.controllers[ctrl.name] = ctrl
+
+ globalStatus.mutex.Lock()
+ globalStatus.controllers[ctrl.uuid] = ctrl
+ globalStatus.mutex.Unlock()
+
+ go ctrl.runController(ctrl.params)
+ return ctrl
+}
+
+// CreateController installs a new controller in the
+// manager. If a controller with the name already exists
+// this method returns false without triggering, otherwise
+// creates the controller and runs it immediately.
+func (m *Manager) CreateController(name string, params ControllerParams) bool {
+ m.mutex.Lock()
+ defer m.mutex.Unlock()
+
+ if m.controllers != nil {
+ if _, exists := m.controllers[name]; exists {
+ return false
+ }
+ } else {
+ m.controllers = controllerMap{}
+ }
+ m.createControllerLocked(name, params)
+ return true
+}
+
+func (m *Manager) removeController(ctrl *managedController) {
+ ctrl.stopController()
+ delete(m.controllers, ctrl.name)
+
+ globalStatus.mutex.Lock()
+ delete(globalStatus.controllers, ctrl.uuid)
+ globalStatus.mutex.Unlock()
+
+ ctrl.getLogger().Debug("Removed controller")
+}
+
+func (m *Manager) lookup(name string) *managedController {
+ m.mutex.RLock()
+ defer m.mutex.RUnlock()
+
+ if c, ok := m.controllers[name]; ok {
+ return c
+ }
+
+ return nil
+}
+
+func (m *Manager) removeAndReturnController(name string) (*managedController, error) {
+ m.mutex.Lock()
+ defer m.mutex.Unlock()
+
+ if m.controllers == nil {
+ return nil, fmt.Errorf("empty controller map")
+ }
+
+ oldCtrl, ok := m.controllers[name]
+ if !ok {
+ return nil, fmt.Errorf("unable to find controller %s", name)
+ }
+
+ m.removeController(oldCtrl)
+
+ return oldCtrl, nil
+}
+
+// RemoveController stops and removes a controller from the manager. If DoFunc
+// is currently running, DoFunc is allowed to complete in the background.
+func (m *Manager) RemoveController(name string) error {
+ _, err := m.removeAndReturnController(name)
+ return err
+}
+
+// RemoveControllerAndWait stops and removes a controller using
+// RemoveController() and then waits for it to run to completion.
+func (m *Manager) RemoveControllerAndWait(name string) error {
+ oldCtrl, err := m.removeAndReturnController(name)
+ if err == nil {
+ <-oldCtrl.terminated
+ }
+
+ return err
+}
+
+func (m *Manager) removeAll() []*managedController {
+ ctrls := []*managedController{}
+
+ m.mutex.Lock()
+ defer m.mutex.Unlock()
+
+ if m.controllers == nil {
+ return ctrls
+ }
+
+ for _, ctrl := range m.controllers {
+ m.removeController(ctrl)
+ ctrls = append(ctrls, ctrl)
+ }
+
+ return ctrls
+}
+
+// RemoveAll stops and removes all controllers of the manager
+func (m *Manager) RemoveAll() {
+ m.removeAll()
+}
+
+// RemoveAllAndWait stops and removes all controllers of the manager and then
+// waits for all controllers to exit
+func (m *Manager) RemoveAllAndWait() {
+ ctrls := m.removeAll()
+ for _, ctrl := range ctrls {
+ <-ctrl.terminated
+ }
+}
+
+// GetStatusModel returns the status of all controllers as models.ControllerStatuses
+func (m *Manager) GetStatusModel() models.ControllerStatuses {
+ // Create a copy of pointers to current controller so we can unlock the
+ // manager mutex quickly again
+ controllers := controllerMap{}
+ m.mutex.RLock()
+ for key, c := range m.controllers {
+ controllers[key] = c
+ }
+ m.mutex.RUnlock()
+
+ statuses := models.ControllerStatuses{}
+ for _, c := range controllers {
+ statuses = append(statuses, c.GetStatusModel())
+ }
+
+ return statuses
+}
+
+// TriggerController triggers the controller with the specified name.
+func (m *Manager) TriggerController(name string) {
+ ctrl := m.lookup(name)
+ if ctrl == nil {
+ return
+ }
+
+ select {
+ case ctrl.trigger <- struct{}{}:
+ default:
+ }
+}
+
+// FakeManager returns a fake controller manager with the specified number of
+// failing controllers. The returned manager is identical in any regard except
+// for internal pointers.
+// Used for testing only.
+func FakeManager(failingControllers int) *Manager {
+ m := &Manager{
+ controllers: controllerMap{},
+ }
+
+ for i := 0; i < failingControllers; i++ {
+ ctrl := &managedController{
+ controller: controller{
+ name: fmt.Sprintf("controller-%d", i),
+ uuid: fmt.Sprintf("%d", i),
+ stop: make(chan struct{}),
+ update: make(chan ControllerParams, 1),
+ trigger: make(chan struct{}, 1),
+ terminated: make(chan struct{}),
+ lastError: fmt.Errorf("controller failed"),
+ failureCount: 1,
+ consecutiveErrors: 1,
+ },
+ }
+
+ ctrl.params.Context, ctrl.cancelDoFunc = context.WithCancel(context.Background())
+ m.controllers[ctrl.name] = ctrl
+ }
+
+ return m
+}
+
+type managedController struct {
+ controller
+
+ params ControllerParams
+ cancelDoFunc context.CancelFunc
+}
+
+// updateParamsLocked sanitizes and sets the controller's parameters.
+//
+// If the RunInterval exceeds ControllerMaxInterval, it will be capped.
+//
+// Manager's mutex must be held
+func (c *managedController) updateParamsLocked(params ControllerParams) {
+ // ensure the callbacks are valid
+ if params.DoFunc == nil {
+ params.DoFunc = func(ctx context.Context) error {
+ return undefinedDoFunc(c.name)
+ }
+ }
+ if params.StopFunc == nil {
+ params.StopFunc = NoopFunc
+ }
+
+ // Enforce max controller interval
+ maxInterval := time.Duration(option.Config.MaxControllerInterval) * time.Second
+ if maxInterval > 0 && params.RunInterval > maxInterval {
+ c.getLogger().Infof("Limiting interval to %s", maxInterval)
+ params.RunInterval = maxInterval
+ }
+
+ // Save current context on update if not canceling
+ ctx := c.params.Context
+ // Check if the current context needs to be cancelled
+ if c.params.CancelDoFuncOnUpdate && c.cancelDoFunc != nil {
+ c.cancelDoFunc()
+ c.params.Context = nil
+ }
+
+ // (re)set the context as the previous might have been cancelled
+ if c.params.Context == nil {
+ if params.Context == nil {
+ ctx, c.cancelDoFunc = context.WithCancel(context.Background())
+ } else {
+ ctx, c.cancelDoFunc = context.WithCancel(params.Context)
+ }
+ }
+
+ c.params = params
+ c.params.Context = ctx
+}
+
+func (c *managedController) stopController() {
+ if c.cancelDoFunc != nil {
+ c.cancelDoFunc()
+ }
+
+ close(c.stop)
+}
+
+// GetStatusModel returns a models.ControllerStatus representing the
+// controller's configuration & status
+func (c *managedController) GetStatusModel() *models.ControllerStatus {
+ c.mutex.RLock()
+ defer c.mutex.RUnlock()
+
+ status := &models.ControllerStatus{
+ Name: c.name,
+ UUID: strfmt.UUID(c.uuid),
+ Configuration: &models.ControllerStatusConfiguration{
+ ErrorRetry: !c.params.NoErrorRetry,
+ ErrorRetryBase: strfmt.Duration(c.params.ErrorRetryBaseDuration),
+ Interval: strfmt.Duration(c.params.RunInterval),
+ },
+ Status: &models.ControllerStatusStatus{
+ SuccessCount: int64(c.successCount),
+ LastSuccessTimestamp: strfmt.DateTime(c.lastSuccessStamp),
+ FailureCount: int64(c.failureCount),
+ LastFailureTimestamp: strfmt.DateTime(c.lastErrorStamp),
+ ConsecutiveFailureCount: int64(c.consecutiveErrors),
+ },
+ }
+
+ if c.lastError != nil {
+ status.Status.LastFailureMsg = c.lastError.Error()
+ }
+
+ return status
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/crypto/certificatemanager/certificate_manager.go b/vendor/github.com/cilium/cilium/pkg/crypto/certificatemanager/certificate_manager.go
new file mode 100644
index 000000000..e2378e6c1
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/crypto/certificatemanager/certificate_manager.go
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package certificatemanager
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "github.com/cilium/cilium/pkg/hive/cell"
+ k8sClient "github.com/cilium/cilium/pkg/k8s/client"
+ "github.com/cilium/cilium/pkg/policy/api"
+
+ "github.com/spf13/pflag"
+)
+
+var Cell = cell.Module(
+ "certificate-manager",
+ "Provides TLS certificates and secrets",
+
+ cell.Provide(NewManager),
+
+ cell.Config(defaultManagerConfig),
+)
+
+type CertificateManager interface {
+ GetTLSContext(ctx context.Context, tlsCtx *api.TLSContext, ns string) (ca, public, private string, err error)
+}
+
+type SecretManager interface {
+ GetSecrets(ctx context.Context, secret *api.Secret, ns string) (string, map[string][]byte, error)
+ GetSecretString(ctx context.Context, secret *api.Secret, ns string) (string, error)
+}
+
+var defaultManagerConfig = managerConfig{
+ CertificatesDirectory: "/var/run/cilium/certs",
+}
+
+type managerConfig struct {
+ // CertificatesDirectory is the root directory to be used by cilium to find
+ // certificates locally.
+ CertificatesDirectory string
+}
+
+func (mc managerConfig) Flags(flags *pflag.FlagSet) {
+ flags.String("certificates-directory", mc.CertificatesDirectory, "Root directory to find certificates specified in L7 TLS policy enforcement")
+}
+
+// Manager will manage the way certificates are retrieved based in the given
+// k8sClient and rootPath.
+type manager struct {
+ rootPath string
+ k8sClient k8sClient.Clientset
+}
+
+// NewManager returns a new manager.
+func NewManager(cfg managerConfig, clientset k8sClient.Clientset) (CertificateManager, SecretManager) {
+ m := &manager{
+ rootPath: cfg.CertificatesDirectory,
+ k8sClient: clientset,
+ }
+
+ return m, m
+}
+
+// GetSecrets returns either local or k8s secrets, giving precedence for local secrets if configured.
+// The 'ns' parameter is used as the secret namespace if 'secret.Namespace' is an empty string.
+func (m *manager) GetSecrets(ctx context.Context, secret *api.Secret, ns string) (string, map[string][]byte, error) {
+ if secret == nil {
+ return "", nil, fmt.Errorf("Secret must not be nil")
+ }
+
+ if secret.Namespace != "" {
+ ns = secret.Namespace
+ }
+
+ if secret.Name == "" {
+ return ns, nil, fmt.Errorf("Missing Secret name")
+ }
+ nsName := filepath.Join(ns, secret.Name)
+
+ // Give priority to local secrets.
+ // K8s API request is only done if the local secret directory can't be read!
+ certPath := filepath.Join(m.rootPath, nsName)
+ files, ioErr := os.ReadDir(certPath)
+ if ioErr == nil {
+ secrets := make(map[string][]byte, len(files))
+ for _, file := range files {
+ var bytes []byte
+
+ path := filepath.Join(certPath, file.Name())
+ bytes, ioErr = os.ReadFile(path)
+ if ioErr == nil {
+ secrets[file.Name()] = bytes
+ }
+ }
+ // Return the (latest) error only if no secrets were found
+ if len(secrets) == 0 && ioErr != nil {
+ return nsName, nil, ioErr
+ }
+ return nsName, secrets, nil
+ }
+ secrets, err := m.k8sClient.GetSecrets(ctx, ns, secret.Name)
+ return nsName, secrets, err
+}
+
+const (
+ caDefaultName = "ca.crt"
+ publicDefaultName = "tls.crt"
+ privateDefaultName = "tls.key"
+)
+
+// GetTLSContext returns a new ca, public and private certificates found based
+// in the given api.TLSContext.
+func (m *manager) GetTLSContext(ctx context.Context, tlsCtx *api.TLSContext, ns string) (ca, public, private string, err error) {
+ name, secrets, err := m.GetSecrets(ctx, tlsCtx.Secret, ns)
+ if err != nil {
+ return "", "", "", err
+ }
+
+ caName := caDefaultName
+ if tlsCtx.TrustedCA != "" {
+ caName = tlsCtx.TrustedCA
+ }
+ caBytes, ok := secrets[caName]
+ if ok {
+ ca = string(caBytes)
+ } else if tlsCtx.TrustedCA != "" {
+ return "", "", "", fmt.Errorf("Trusted CA %s not found in secret %s", caName, name)
+ }
+
+ publicName := publicDefaultName
+ if tlsCtx.Certificate != "" {
+ publicName = tlsCtx.Certificate
+ }
+ publicBytes, ok := secrets[publicName]
+ if ok {
+ public = string(publicBytes)
+ } else if tlsCtx.Certificate != "" {
+ return "", "", "", fmt.Errorf("Certificate %s not found in secret %s", publicName, name)
+ }
+
+ privateName := privateDefaultName
+ if tlsCtx.PrivateKey != "" {
+ privateName = tlsCtx.PrivateKey
+ }
+ privateBytes, ok := secrets[privateName]
+ if ok {
+ private = string(privateBytes)
+ } else if tlsCtx.PrivateKey != "" {
+ return "", "", "", fmt.Errorf("Private Key %s not found in secret %s", privateName, name)
+ }
+
+ if caBytes == nil && publicBytes == nil && privateBytes == nil {
+ return "", "", "", fmt.Errorf("TLS certificates not found in secret %s ", name)
+ }
+
+ return ca, public, private, nil
+}
+
+// GetSecretString returns a secret string stored in a k8s secret
+func (m *manager) GetSecretString(ctx context.Context, secret *api.Secret, ns string) (string, error) {
+ name, secrets, err := m.GetSecrets(ctx, secret, ns)
+ if err != nil {
+ return "", err
+ }
+
+ if len(secrets) == 1 {
+ // get the lone item by looping into the map
+ for _, value := range secrets {
+ return string(value), nil
+ }
+ }
+ return "", fmt.Errorf("Secret %s must have exactly one item", name)
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/attach_cgroup.go b/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/attach_cgroup.go
new file mode 100644
index 000000000..33ca88426
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/attach_cgroup.go
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package probes
+
+import (
+ "errors"
+ "fmt"
+ "sync"
+
+ "github.com/cilium/ebpf"
+ "github.com/cilium/ebpf/asm"
+ "github.com/cilium/ebpf/link"
+ "golang.org/x/sys/unix"
+)
+
+// HaveAttachCgroup returns nil if the kernel is compiled with
+// CONFIG_CGROUP_BPF.
+//
+// It's only an approximation and doesn't execute a successful cgroup attachment
+// under the hood. If any unexpected errors are encountered, the original error
+// is returned.
+func HaveAttachCgroup() error {
+ attachCgroupOnce.Do(func() {
+ attachCgroupResult = haveAttachCgroup()
+ })
+
+ return attachCgroupResult
+}
+
+func haveAttachCgroup() error {
+ // Load known-good program supported by the earliest kernels with cgroup
+ // support.
+ spec := &ebpf.ProgramSpec{
+ Type: ebpf.CGroupSKB,
+ AttachType: ebpf.AttachCGroupInetIngress,
+ Instructions: asm.Instructions{
+ asm.LoadImm(asm.R0, 0, asm.DWord),
+ asm.Return(),
+ },
+ }
+
+ p, err := ebpf.NewProgramWithOptions(spec, ebpf.ProgramOptions{
+ LogDisabled: true,
+ })
+ if err != nil {
+ return fmt.Errorf("create cgroup program: %w: %w", err, ebpf.ErrNotSupported)
+ }
+ defer p.Close()
+
+ // Attaching to a non-cgroup node should result in EBADF when creating the
+ // link, compared to EINVAL if the kernel does not support or was compiled
+ // without CONFIG_CGROUP_BPF.
+ _, err = link.AttachCgroup(link.CgroupOptions{Path: "/dev/null", Program: p, Attach: spec.AttachType})
+ if errors.Is(err, unix.EBADF) {
+ // The kernel checked the given file descriptor from within the cgroup prog
+ // attach handler. Assume it supports attaching cgroup progs.
+ return nil
+ }
+ if err != nil {
+ // Preserve the original error in the error string. Needs Go 1.20.
+ return fmt.Errorf("link cgroup program to /dev/null: %w: %w", err, ebpf.ErrNotSupported)
+ }
+
+ return errors.New("attaching prog to /dev/null did not result in error")
+}
+
+var attachCgroupOnce sync.Once
+var attachCgroupResult error
diff --git a/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/attach_type.go b/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/attach_type.go
new file mode 100644
index 000000000..5ce0c0aa9
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/attach_type.go
@@ -0,0 +1,81 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package probes
+
+import (
+ "errors"
+
+ "golang.org/x/sys/unix"
+
+ "github.com/cilium/ebpf"
+ "github.com/cilium/ebpf/asm"
+ "github.com/cilium/ebpf/features"
+
+ "github.com/cilium/cilium/pkg/lock"
+)
+
+// HaveAttachType returns nil if the given program/attach type combination is
+// supported by the underlying kernel. Returns ebpf.ErrNotSupported if loading a
+// program with the given Program/AttachType fails. If the probe is inconclusive
+// due to an unrecognized return code, the original error is returned.
+//
+// Note that program types that don't use attach types will silently succeed if
+// an attach type is specified.
+//
+// Probe results are cached by the package and shouldn't be memoized by the
+// caller.
+func HaveAttachType(pt ebpf.ProgramType, at ebpf.AttachType) (err error) {
+ if err := features.HaveProgramType(pt); err != nil {
+ return err
+ }
+
+ attachProbesMu.Lock()
+ defer attachProbesMu.Unlock()
+ if err, ok := attachProbes[attachProbe{pt, at}]; ok {
+ return err
+ }
+
+ defer func() {
+ // Closes over named return variable err to cache any returned errors.
+ attachProbes[attachProbe{pt, at}] = err
+ }()
+
+ spec := &ebpf.ProgramSpec{
+ Type: pt,
+ AttachType: at,
+ Instructions: asm.Instructions{
+ // recvmsg and peername require a return value of 1, use it for all probes.
+ asm.LoadImm(asm.R0, 1, asm.DWord),
+ asm.Return(),
+ },
+ }
+
+ prog, err := ebpf.NewProgramWithOptions(spec, ebpf.ProgramOptions{
+ LogDisabled: true,
+ })
+ if err == nil {
+ prog.Close()
+ }
+
+ // EINVAL occurs when attempting to create a program with an unknown type.
+ // E2BIG occurs when ProgLoadAttr contains non-zero bytes past the end
+ // of the struct known by the running kernel, meaning the kernel is too old
+ // to support the given prog type.
+ if errors.Is(err, unix.EINVAL) || errors.Is(err, unix.E2BIG) {
+ err = ebpf.ErrNotSupported
+ }
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+type attachProbe struct {
+ pt ebpf.ProgramType
+ at ebpf.AttachType
+}
+
+var attachProbesMu lock.Mutex
+var attachProbes map[attachProbe]error = make(map[attachProbe]error)
diff --git a/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/doc.go b/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/doc.go
new file mode 100644
index 000000000..285c8851d
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/doc.go
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Package probes provides BPF features checks based on bpftool.
+package probes
diff --git a/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/kernel_hz.go b/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/kernel_hz.go
new file mode 100644
index 000000000..c815eb729
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/kernel_hz.go
@@ -0,0 +1,151 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package probes
+
+import (
+ "bufio"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "os"
+ "time"
+)
+
+// Available CONFIG_HZ values, sorted from highest to lowest.
+var hzValues = []uint16{1000, 300, 250, 100}
+
+// KernelHZ attempts to estimate the kernel's CONFIG_HZ compile-time value by
+// making snapshots of the kernel timestamp with a time interval in between.
+//
+// Blocks for at least 100ms while the measurement is in progress. Can block
+// significantly longer under some hypervisors like VirtualBox due to buggy
+// clocks, interrupt coalescing and low timer resolution.
+func KernelHZ() (uint16, error) {
+ f, err := os.Open("/proc/schedstat")
+ if err != nil {
+ return 0, err
+ }
+ defer f.Close()
+
+ // Measure the kernel timestamp at least 100ms apart, giving kernel timer and
+ // wall clock ample opportunity to advance for adequate sample size.
+ j1, err := readSchedstat(f)
+ if err != nil {
+ return 0, err
+ }
+
+ // On some platforms, this can put the goroutine to sleep for significantly
+ // longer than 100ms. Do not rely on readings being anywhere near 100ms apart.
+ time.Sleep(time.Millisecond * 100)
+
+ j2, err := readSchedstat(f)
+ if err != nil {
+ return 0, err
+ }
+
+ hz, err := j1.interpolate(j2)
+ if err != nil {
+ return 0, fmt.Errorf("interpolating hz value: %w", err)
+ }
+
+ return nearest(hz, hzValues)
+}
+
+// Jiffies returns the kernel's internal timestamp in jiffies read from
+// /proc/schedstat.
+func Jiffies() (uint64, error) {
+ f, err := os.Open("/proc/schedstat")
+ if err != nil {
+ return 0, err
+ }
+ defer f.Close()
+
+ k, err := readSchedstat(f)
+ if err != nil {
+ return 0, err
+ }
+
+ return k.k, nil
+}
+
+// readSchedstat expects to read /proc/schedstat and returns the first line
+// matching 'timestamp %d'. Upon return, f is rewound to allow reuse.
+//
+// Should not be called concurrently.
+func readSchedstat(f io.ReadSeeker) (ktime, error) {
+ // Rewind the file when done so the next call gets fresh data.
+ defer func() { _, _ = f.Seek(0, 0) }()
+
+ var j uint64
+ var t = time.Now()
+
+ s := bufio.NewScanner(f)
+ for s.Scan() {
+ if _, err := fmt.Sscanf(s.Text(), "timestamp %d", &j); err == nil {
+ return ktime{j, t}, nil
+ }
+ }
+
+ return ktime{}, errors.New("no kernel timestamp found")
+}
+
+type ktime struct {
+ k uint64
+ t time.Time
+}
+
+// interpolate returns the amount of jiffies (ktime) that would have elapsed if
+// both ktimes were measured exactly 1 second apart. Using linear interpolation,
+// the delta between both kernel timestamps is adjusted based on the elapsed
+// wall time between both measurements.
+func (old ktime) interpolate(new ktime) (uint16, error) {
+ if old.t.After(new.t) {
+ return 0, fmt.Errorf("old wall time %v is more recent than %v", old.t, new.t)
+ }
+ if old.k > new.k {
+ return 0, fmt.Errorf("old kernel timer %d is higher than %d", old.k, new.k)
+ }
+
+ // Jiffy and duration delta.
+ kd := new.k - old.k
+ td := new.t.Sub(old.t)
+
+ // Linear interpolation to represent elapsed jiffies as a per-second value.
+ hz := float64(kd) / td.Seconds()
+ hz = math.Round(hz)
+ if hz > math.MaxUint16 {
+ return 0, fmt.Errorf("interpolated hz value would overflow uint16: %f", hz)
+ }
+
+ return uint16(hz), nil
+}
+
+// nearest returns the entry from values that's closest to in. If in has an
+// equal distance to multiple values, the value that appears the earliest in
+// values wins. Returns error if values is empty.
+func nearest(in uint16, values []uint16) (uint16, error) {
+ if len(values) == 0 {
+ return 0, errors.New("values cannot be empty")
+ }
+
+ var out uint16
+ min := ^uint16(0)
+ for _, v := range values {
+ // Get absolute distance between in and v.
+ d := uint16(in - v)
+ if in < v {
+ d = v - in
+ }
+
+ // Check if the distance to the current number is smaller than to the
+ // previous number.
+ if d < min {
+ min = d
+ out = v
+ }
+ }
+
+ return out, nil
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/managed_neighbors.go b/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/managed_neighbors.go
new file mode 100644
index 000000000..f260c1e2f
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/managed_neighbors.go
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package probes
+
+import (
+ "errors"
+ "fmt"
+ "net"
+ "runtime"
+ "sync"
+
+ "github.com/vishvananda/netlink"
+ "github.com/vishvananda/netns"
+)
+
+var (
+ managedNeighborOnce sync.Once
+ managedNeighborResult error
+)
+
+// HaveManagedNeighbors returns nil if the host supports managed neighbor entries (NTF_EXT_MANAGED).
+// On unexpected probe results this function will terminate with log.Fatal().
+func HaveManagedNeighbors() error {
+ managedNeighborOnce.Do(func() {
+ ch := make(chan struct{})
+
+ // In order to call haveManagedNeighbors safely, it has to be started
+ // in a goroutine, so we can make sure the goroutine ends when the function exits.
+ // This makes sure the underlying OS thread exits if we fail to restore it to the original netns.
+ go func() {
+ managedNeighborResult = haveManagedNeighbors()
+ close(ch)
+ }()
+ <-ch // wait for probe to finish
+
+ // if we encounter a different error than ErrNotSupported, terminate the agent.
+ if managedNeighborResult != nil && !errors.Is(managedNeighborResult, ErrNotSupported) {
+ log.WithError(managedNeighborResult).Fatal("failed to probe managed neighbor support")
+ }
+ })
+
+ return managedNeighborResult
+}
+
+func haveManagedNeighbors() (outer error) {
+ runtime.LockOSThread()
+ oldns, err := netns.Get()
+ if err != nil {
+ return fmt.Errorf("failed to get current netns: %w", err)
+ }
+ defer oldns.Close()
+
+ newns, err := netns.New()
+ if err != nil {
+ return fmt.Errorf("failed to create new netns: %w", err)
+ }
+ defer newns.Close()
+ defer func() {
+ // defer closes over named return variable err
+ if nerr := netns.Set(oldns); nerr != nil {
+ // The current goroutine is locked to an OS thread and we've failed
+ // to undo state modifications to the thread. Returning without unlocking
+ // the goroutine will make sure the underlying OS thread dies.
+ outer = fmt.Errorf("error setting thread back to its original netns: %w (original error: %s)", nerr, outer)
+ return
+ }
+ // only now that we have successfully changed the thread back to its
+ // original state (netns) we can safely unlock the goroutine from its OS thread.
+ runtime.UnlockOSThread()
+ }()
+
+ // Use a veth device instead of a dummy to avoid the kernel having to modprobe
+ // the dummy kmod, which could potentially be compiled out. veth is currently
+ // a hard dependency for Cilium, so safe to assume the module is available if
+ // not already loaded.
+ veth := &netlink.Veth{
+ LinkAttrs: netlink.LinkAttrs{Name: "veth0"},
+ PeerName: "veth1",
+ }
+
+ if err := netlink.LinkAdd(veth); err != nil {
+ return fmt.Errorf("failed to add dummy veth: %w", err)
+ }
+
+ neigh := netlink.Neigh{
+ LinkIndex: veth.Index,
+ IP: net.IPv4(0, 0, 0, 1),
+ Flags: NTF_EXT_LEARNED,
+ FlagsExt: NTF_EXT_MANAGED,
+ }
+
+ if err := netlink.NeighAdd(&neigh); err != nil {
+ return fmt.Errorf("failed to add neighbor: %w", err)
+ }
+
+ nl, err := netlink.NeighList(veth.Index, 0)
+ if err != nil {
+ return fmt.Errorf("failed to list neighbors: %w", err)
+ }
+
+ for _, n := range nl {
+ if !n.IP.Equal(neigh.IP) {
+ continue
+ }
+ if n.Flags != NTF_EXT_LEARNED {
+ continue
+ }
+ if n.FlagsExt != NTF_EXT_MANAGED {
+ continue
+ }
+
+ return nil
+ }
+
+ return ErrNotSupported
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/probes.go b/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/probes.go
new file mode 100644
index 000000000..1a8dbf0cc
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/probes.go
@@ -0,0 +1,634 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package probes
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync"
+ "text/template"
+
+ "github.com/cilium/ebpf"
+ "github.com/cilium/ebpf/asm"
+ "github.com/cilium/ebpf/features"
+ "github.com/cilium/ebpf/rlimit"
+ "golang.org/x/sys/unix"
+
+ "github.com/cilium/cilium/pkg/command/exec"
+ "github.com/cilium/cilium/pkg/defaults"
+ "github.com/cilium/cilium/pkg/logging"
+ "github.com/cilium/cilium/pkg/logging/logfields"
+)
+
+var (
+ log = logging.DefaultLogger.WithField(logfields.LogSubsys, "probes")
+ once sync.Once
+ probeManager *ProbeManager
+ tpl = template.New("headerfile")
+)
+
+func init() {
+ const content = `
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright Authors of Cilium */
+
+/* THIS FILE WAS GENERATED DURING AGENT STARTUP. */
+
+#pragma once
+
+{{- if not .Common}}
+#include "features.h"
+{{- end}}
+
+{{- range $key, $value := .Features}}
+{{- if $value}}
+#define {{$key}} 1
+{{end}}
+{{- end}}
+`
+ var err error
+ tpl, err = tpl.Parse(content)
+ if err != nil {
+ log.WithError(err).Fatal("could not parse headerfile template")
+ }
+}
+
+// ErrNotSupported indicates that a feature is not supported by the current kernel.
+var ErrNotSupported = errors.New("not supported")
+
+// KernelParam is a type based on string which represents CONFIG_* kernel
+// parameters which usually have values "y", "n" or "m".
+type KernelParam string
+
+// Enabled checks whether the kernel parameter is enabled.
+func (kp KernelParam) Enabled() bool {
+ return kp == "y"
+}
+
+// Module checks whether the kernel parameter is enabled as a module.
+func (kp KernelParam) Module() bool {
+ return kp == "m"
+}
+
+// kernelOption holds information about kernel parameters to probe.
+type kernelOption struct {
+ Description string
+ Enabled bool
+ CanBeModule bool
+}
+
+type ProgramHelper struct {
+ Program ebpf.ProgramType
+ Helper asm.BuiltinFunc
+}
+
+type miscFeatures struct {
+ HaveLargeInsnLimit bool
+ HaveFibIfindex bool
+}
+
+type FeatureProbes struct {
+ ProgramHelpers map[ProgramHelper]bool
+ Misc miscFeatures
+}
+
+// SystemConfig contains kernel configuration and sysctl parameters related to
+// BPF functionality.
+type SystemConfig struct {
+ UnprivilegedBpfDisabled int `json:"unprivileged_bpf_disabled"`
+ BpfJitEnable int `json:"bpf_jit_enable"`
+ BpfJitHarden int `json:"bpf_jit_harden"`
+ BpfJitKallsyms int `json:"bpf_jit_kallsyms"`
+ BpfJitLimit int `json:"bpf_jit_limit"`
+ ConfigBpf KernelParam `json:"CONFIG_BPF"`
+ ConfigBpfSyscall KernelParam `json:"CONFIG_BPF_SYSCALL"`
+ ConfigHaveEbpfJit KernelParam `json:"CONFIG_HAVE_EBPF_JIT"`
+ ConfigBpfJit KernelParam `json:"CONFIG_BPF_JIT"`
+ ConfigBpfJitAlwaysOn KernelParam `json:"CONFIG_BPF_JIT_ALWAYS_ON"`
+ ConfigCgroups KernelParam `json:"CONFIG_CGROUPS"`
+ ConfigCgroupBpf KernelParam `json:"CONFIG_CGROUP_BPF"`
+ ConfigCgroupNetClassID KernelParam `json:"CONFIG_CGROUP_NET_CLASSID"`
+ ConfigSockCgroupData KernelParam `json:"CONFIG_SOCK_CGROUP_DATA"`
+ ConfigBpfEvents KernelParam `json:"CONFIG_BPF_EVENTS"`
+ ConfigKprobeEvents KernelParam `json:"CONFIG_KPROBE_EVENTS"`
+ ConfigUprobeEvents KernelParam `json:"CONFIG_UPROBE_EVENTS"`
+ ConfigTracing KernelParam `json:"CONFIG_TRACING"`
+ ConfigFtraceSyscalls KernelParam `json:"CONFIG_FTRACE_SYSCALLS"`
+ ConfigFunctionErrorInjection KernelParam `json:"CONFIG_FUNCTION_ERROR_INJECTION"`
+ ConfigBpfKprobeOverride KernelParam `json:"CONFIG_BPF_KPROBE_OVERRIDE"`
+ ConfigNet KernelParam `json:"CONFIG_NET"`
+ ConfigXdpSockets KernelParam `json:"CONFIG_XDP_SOCKETS"`
+ ConfigLwtunnelBpf KernelParam `json:"CONFIG_LWTUNNEL_BPF"`
+ ConfigNetActBpf KernelParam `json:"CONFIG_NET_ACT_BPF"`
+ ConfigNetClsBpf KernelParam `json:"CONFIG_NET_CLS_BPF"`
+ ConfigNetClsAct KernelParam `json:"CONFIG_NET_CLS_ACT"`
+ ConfigNetSchIngress KernelParam `json:"CONFIG_NET_SCH_INGRESS"`
+ ConfigXfrm KernelParam `json:"CONFIG_XFRM"`
+ ConfigIPRouteClassID KernelParam `json:"CONFIG_IP_ROUTE_CLASSID"`
+ ConfigIPv6Seg6Bpf KernelParam `json:"CONFIG_IPV6_SEG6_BPF"`
+ ConfigBpfLircMode2 KernelParam `json:"CONFIG_BPF_LIRC_MODE2"`
+ ConfigBpfStreamParser KernelParam `json:"CONFIG_BPF_STREAM_PARSER"`
+ ConfigNetfilterXtMatchBpf KernelParam `json:"CONFIG_NETFILTER_XT_MATCH_BPF"`
+ ConfigBpfilter KernelParam `json:"CONFIG_BPFILTER"`
+ ConfigBpfilterUmh KernelParam `json:"CONFIG_BPFILTER_UMH"`
+ ConfigTestBpf KernelParam `json:"CONFIG_TEST_BPF"`
+ ConfigKernelHz KernelParam `json:"CONFIG_HZ"`
+}
+
+// MapTypes contains bools indicating which types of BPF maps the currently
+// running kernel supports.
+type MapTypes struct {
+ HaveHashMapType bool `json:"have_hash_map_type"`
+ HaveArrayMapType bool `json:"have_array_map_type"`
+ HaveProgArrayMapType bool `json:"have_prog_array_map_type"`
+ HavePerfEventArrayMapType bool `json:"have_perf_event_array_map_type"`
+ HavePercpuHashMapType bool `json:"have_percpu_hash_map_type"`
+ HavePercpuArrayMapType bool `json:"have_percpu_array_map_type"`
+ HaveStackTraceMapType bool `json:"have_stack_trace_map_type"`
+ HaveCgroupArrayMapType bool `json:"have_cgroup_array_map_type"`
+ HaveLruHashMapType bool `json:"have_lru_hash_map_type"`
+ HaveLruPercpuHashMapType bool `json:"have_lru_percpu_hash_map_type"`
+ HaveLpmTrieMapType bool `json:"have_lpm_trie_map_type"`
+ HaveArrayOfMapsMapType bool `json:"have_array_of_maps_map_type"`
+ HaveHashOfMapsMapType bool `json:"have_hash_of_maps_map_type"`
+ HaveDevmapMapType bool `json:"have_devmap_map_type"`
+ HaveSockmapMapType bool `json:"have_sockmap_map_type"`
+ HaveCpumapMapType bool `json:"have_cpumap_map_type"`
+ HaveXskmapMapType bool `json:"have_xskmap_map_type"`
+ HaveSockhashMapType bool `json:"have_sockhash_map_type"`
+ HaveCgroupStorageMapType bool `json:"have_cgroup_storage_map_type"`
+ HaveReuseportSockarrayMapType bool `json:"have_reuseport_sockarray_map_type"`
+ HavePercpuCgroupStorageMapType bool `json:"have_percpu_cgroup_storage_map_type"`
+ HaveQueueMapType bool `json:"have_queue_map_type"`
+ HaveStackMapType bool `json:"have_stack_map_type"`
+}
+
+// Features contains BPF feature checks returned by bpftool.
+type Features struct {
+ SystemConfig `json:"system_config"`
+ MapTypes `json:"map_types"`
+}
+
+// ProbeManager is a manager of BPF feature checks.
+type ProbeManager struct {
+ features Features
+}
+
+// NewProbeManager returns a new instance of ProbeManager - a manager of BPF
+// feature checks.
+func NewProbeManager() *ProbeManager {
+ newProbeManager := func() {
+ probeManager = &ProbeManager{}
+ probeManager.features = probeManager.Probe()
+ }
+ once.Do(newProbeManager)
+ return probeManager
+}
+
+// Probe probes the underlying kernel for features.
+func (*ProbeManager) Probe() Features {
+ var features Features
+ out, err := exec.WithTimeout(
+ defaults.ExecTimeout,
+ "bpftool", "-j", "feature", "probe",
+ ).CombinedOutput(log, true)
+ if err != nil {
+ log.WithError(err).Fatal("could not run bpftool")
+ }
+ if err := json.Unmarshal(out, &features); err != nil {
+ log.WithError(err).Fatal("could not parse bpftool output")
+ }
+ return features
+}
+
+// SystemConfigProbes performs a check of kernel configuration parameters. It
+// returns an error when parameters required by Cilium are not enabled. It logs
+// warnings when optional parameters are not enabled.
+//
+// When kernel config file is not found, bpftool can't probe kernel configuration
+// parameter real setting, so only return error log when kernel config file exists
+// and kernel configuration parameter setting is disabled
+func (p *ProbeManager) SystemConfigProbes() error {
+ var notFound bool
+ if !p.KernelConfigAvailable() {
+ notFound = true
+ log.Info("Kernel config file not found: if the agent fails to start, check the system requirements at https://docs.cilium.io/en/stable/operations/system_requirements")
+ }
+ requiredParams := p.GetRequiredConfig()
+ for param, kernelOption := range requiredParams {
+ if !kernelOption.Enabled && !notFound {
+ module := ""
+ if kernelOption.CanBeModule {
+ module = " or module"
+ }
+ return fmt.Errorf("%s kernel parameter%s is required (needed for: %s)", param, module, kernelOption.Description)
+ }
+ }
+ optionalParams := p.GetOptionalConfig()
+ for param, kernelOption := range optionalParams {
+ if !kernelOption.Enabled && !notFound {
+ module := ""
+ if kernelOption.CanBeModule {
+ module = " or module"
+ }
+ log.Warningf("%s optional kernel parameter%s is not in kernel (needed for: %s)", param, module, kernelOption.Description)
+ }
+ }
+ return nil
+}
+
+// GetRequiredConfig performs a check of mandatory kernel configuration options. It
+// returns a map indicating which required kernel parameters are enabled - and which are not.
+// GetRequiredConfig is being used by CLI "cilium kernel-check".
+func (p *ProbeManager) GetRequiredConfig() map[KernelParam]kernelOption {
+ config := p.features.SystemConfig
+ coreInfraDescription := "Essential eBPF infrastructure"
+ kernelParams := make(map[KernelParam]kernelOption)
+
+ kernelParams["CONFIG_BPF"] = kernelOption{
+ Enabled: config.ConfigBpf.Enabled(),
+ Description: coreInfraDescription,
+ CanBeModule: false,
+ }
+ kernelParams["CONFIG_BPF_SYSCALL"] = kernelOption{
+ Enabled: config.ConfigBpfSyscall.Enabled(),
+ Description: coreInfraDescription,
+ CanBeModule: false,
+ }
+ kernelParams["CONFIG_NET_SCH_INGRESS"] = kernelOption{
+ Enabled: config.ConfigNetSchIngress.Enabled() || config.ConfigNetSchIngress.Module(),
+ Description: coreInfraDescription,
+ CanBeModule: true,
+ }
+ kernelParams["CONFIG_NET_CLS_BPF"] = kernelOption{
+ Enabled: config.ConfigNetClsBpf.Enabled() || config.ConfigNetClsBpf.Module(),
+ Description: coreInfraDescription,
+ CanBeModule: true,
+ }
+ kernelParams["CONFIG_NET_CLS_ACT"] = kernelOption{
+ Enabled: config.ConfigNetClsAct.Enabled(),
+ Description: coreInfraDescription,
+ CanBeModule: false,
+ }
+ kernelParams["CONFIG_BPF_JIT"] = kernelOption{
+ Enabled: config.ConfigBpfJit.Enabled(),
+ Description: coreInfraDescription,
+ CanBeModule: false,
+ }
+ kernelParams["CONFIG_HAVE_EBPF_JIT"] = kernelOption{
+ Enabled: config.ConfigHaveEbpfJit.Enabled(),
+ Description: coreInfraDescription,
+ CanBeModule: false,
+ }
+
+ return kernelParams
+}
+
+// GetOptionalConfig performs a check of *optional* kernel configuration options. It
+// returns a map indicating which optional/non-mandatory kernel parameters are enabled.
+// GetOptionalConfig is being used by CLI "cilium kernel-check".
+func (p *ProbeManager) GetOptionalConfig() map[KernelParam]kernelOption {
+ config := p.features.SystemConfig
+ kernelParams := make(map[KernelParam]kernelOption)
+
+ kernelParams["CONFIG_CGROUP_BPF"] = kernelOption{
+ Enabled: config.ConfigCgroupBpf.Enabled(),
+ Description: "Host Reachable Services and Sockmap optimization",
+ CanBeModule: false,
+ }
+ kernelParams["CONFIG_LWTUNNEL_BPF"] = kernelOption{
+ Enabled: config.ConfigLwtunnelBpf.Enabled(),
+ Description: "Lightweight Tunnel hook for IP-in-IP encapsulation",
+ CanBeModule: false,
+ }
+ kernelParams["CONFIG_BPF_EVENTS"] = kernelOption{
+ Enabled: config.ConfigBpfEvents.Enabled(),
+ Description: "Visibility and congestion management with datapath",
+ CanBeModule: false,
+ }
+
+ return kernelParams
+}
+
+// KernelConfigAvailable checks if the Kernel Config is available on the
+// system or not.
+func (p *ProbeManager) KernelConfigAvailable() bool {
+ // Check Kernel Config is available or not.
+ // We are replicating BPFTools logic here to check if kernel config is available
+ // https://elixir.bootlin.com/linux/v5.7/source/tools/bpf/bpftool/feature.c#L390
+ info := unix.Utsname{}
+ err := unix.Uname(&info)
+ if err != nil {
+ return false
+ }
+ release := strings.TrimSpace(string(bytes.Trim(info.Release[:], "\x00")))
+
+ // Any error checking these files will return Kernel config not found error
+ if _, err := os.Stat(fmt.Sprintf("/boot/config-%s", release)); err != nil {
+ if _, err = os.Stat("/proc/config.gz"); err != nil {
+ return false
+ }
+ }
+
+ return true
+}
+
+// HaveProgramHelper is a wrapper around features.HaveProgramHelper() to
+// check if a certain BPF program/helper copmbination is supported by the kernel.
+// On unexpected probe results this function will terminate with log.Fatal().
+func HaveProgramHelper(pt ebpf.ProgramType, helper asm.BuiltinFunc) error {
+ err := features.HaveProgramHelper(pt, helper)
+ if errors.Is(err, ebpf.ErrNotSupported) {
+ return err
+ }
+ if err != nil {
+ log.WithError(err).WithField("programtype", pt).WithField("helper", helper).Fatal("failed to probe helper")
+ }
+ return nil
+}
+
+// HaveLargeInstructionLimit is a wrapper around features.HaveLargeInstructions()
+// to check if the kernel supports the 1 Million instruction limit.
+// On unexpected probe results this function will terminate with log.Fatal().
+func HaveLargeInstructionLimit() error {
+ err := features.HaveLargeInstructions()
+ if errors.Is(err, ebpf.ErrNotSupported) {
+ return err
+ }
+ if err != nil {
+ log.WithError(err).Fatal("failed to probe large instruction limit")
+ }
+ return nil
+}
+
+// HaveBoundedLoops is a wrapper around features.HaveBoundedLoops()
+// to check if the kernel supports bounded loops in BPF programs.
+// On unexpected probe results this function will terminate with log.Fatal().
+func HaveBoundedLoops() error {
+ err := features.HaveBoundedLoops()
+ if errors.Is(err, ebpf.ErrNotSupported) {
+ return err
+ }
+ if err != nil {
+ log.WithError(err).Fatal("failed to probe bounded loops")
+ }
+ return nil
+}
+
+// HaveFibIfindex checks if kernel has d1c362e1dd68 ("bpf: Always return target
+// ifindex in bpf_fib_lookup") which is 5.10+. This got merged in the same kernel
+// as the new redirect helpers.
+func HaveFibIfindex() error {
+ return features.HaveProgramHelper(ebpf.SchedCLS, asm.FnRedirectPeer)
+}
+
+// HaveV2ISA is a wrapper around features.HaveV2ISA() to check if the kernel
+// supports the V2 ISA.
+// On unexpected probe results this function will terminate with log.Fatal().
+func HaveV2ISA() error {
+ err := features.HaveV2ISA()
+ if errors.Is(err, ebpf.ErrNotSupported) {
+ return err
+ }
+ if err != nil {
+ log.WithError(err).Fatal("failed to probe V2 ISA")
+ }
+ return nil
+}
+
+// HaveV3ISA is a wrapper around features.HaveV3ISA() to check if the kernel
+// supports the V3 ISA.
+// On unexpected probe results this function will terminate with log.Fatal().
+func HaveV3ISA() error {
+ err := features.HaveV3ISA()
+ if errors.Is(err, ebpf.ErrNotSupported) {
+ return err
+ }
+ if err != nil {
+ log.WithError(err).Fatal("failed to probe V3 ISA")
+ }
+ return nil
+}
+
+// HaveOuterSourceIPSupport tests whether the kernel support setting the outer
+// source IP address via the bpf_skb_set_tunnel_key BPF helper. We can't rely
+// on the verifier to reject a program using the new support because the
+// verifier just accepts any argument size for that helper; non-supported
+// fields will simply not be used. Instead, we set the outer source IP and
+// retrieve it with bpf_skb_get_tunnel_key right after. If the retrieved value
+// equals the value set, we have a confirmation the kernel supports it.
+func HaveOuterSourceIPSupport() (err error) {
+ defer func() {
+ if err != nil && !errors.Is(err, ebpf.ErrNotSupported) {
+ log.WithError(err).Fatal("failed to probe for outer source IP support")
+ }
+ }()
+
+ if err := rlimit.RemoveMemlock(); err != nil {
+ return err
+ }
+
+ progSpec := &ebpf.ProgramSpec{
+ Name: "set_tunnel_key_probe",
+ Type: ebpf.SchedACT,
+ License: "GPL",
+ }
+ progSpec.Instructions = asm.Instructions{
+ asm.Mov.Reg(asm.R8, asm.R1),
+
+ asm.Mov.Imm(asm.R2, 0),
+ asm.StoreMem(asm.RFP, -8, asm.R2, asm.DWord),
+ asm.StoreMem(asm.RFP, -16, asm.R2, asm.DWord),
+ asm.StoreMem(asm.RFP, -24, asm.R2, asm.DWord),
+ asm.StoreMem(asm.RFP, -32, asm.R2, asm.DWord),
+ asm.StoreMem(asm.RFP, -40, asm.R2, asm.DWord),
+ asm.Mov.Imm(asm.R2, 42),
+ asm.StoreMem(asm.RFP, -44, asm.R2, asm.Word),
+ asm.Mov.Reg(asm.R2, asm.RFP),
+ asm.Add.Imm(asm.R2, -44),
+ asm.Mov.Imm(asm.R3, 44), // sizeof(struct bpf_tunnel_key) when setting the outer source IP is supported.
+ asm.Mov.Imm(asm.R4, 0),
+ asm.FnSkbSetTunnelKey.Call(),
+
+ asm.Mov.Reg(asm.R1, asm.R8),
+ asm.Mov.Reg(asm.R2, asm.RFP),
+ asm.Add.Imm(asm.R2, -44),
+ asm.Mov.Imm(asm.R3, 44),
+ asm.Mov.Imm(asm.R4, 0),
+ asm.FnSkbGetTunnelKey.Call(),
+
+ asm.LoadMem(asm.R0, asm.RFP, -44, asm.Word),
+ asm.Return(),
+ }
+ prog, err := ebpf.NewProgram(progSpec)
+ if err != nil {
+ return err
+ }
+ defer prog.Close()
+
+ pkt := []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ ret, _, err := prog.Test(pkt)
+ if err != nil {
+ return err
+ }
+ if ret != 42 {
+ return ebpf.ErrNotSupported
+ }
+ return nil
+}
+
+// HaveIPv6Support tests whether kernel can open an IPv6 socket. This will
+// also implicitly auto-load IPv6 kernel module if available and not yet
+// loaded.
+func HaveIPv6Support() error {
+ fd, err := unix.Socket(unix.AF_INET6, unix.SOCK_STREAM, 0)
+ if errors.Is(err, unix.EAFNOSUPPORT) || errors.Is(err, unix.EPROTONOSUPPORT) {
+ return ErrNotSupported
+ }
+ unix.Close(fd)
+ return nil
+}
+
+// CreateHeaderFiles creates C header files with macros indicating which BPF
+// features are available in the kernel.
+func CreateHeaderFiles(headerDir string, probes *FeatureProbes) error {
+ common, err := os.Create(filepath.Join(headerDir, "features.h"))
+ if err != nil {
+ return fmt.Errorf("could not create common features header file: %w", err)
+ }
+ defer common.Close()
+ if err := writeCommonHeader(common, probes); err != nil {
+ return fmt.Errorf("could not write common features header file: %w", err)
+ }
+
+ skb, err := os.Create(filepath.Join(headerDir, "features_skb.h"))
+ if err != nil {
+ return fmt.Errorf("could not create skb related features header file: %w", err)
+ }
+ defer skb.Close()
+ if err := writeSkbHeader(skb, probes); err != nil {
+ return fmt.Errorf("could not write skb related features header file: %w", err)
+ }
+
+ xdp, err := os.Create(filepath.Join(headerDir, "features_xdp.h"))
+ if err != nil {
+ return fmt.Errorf("could not create xdp related features header file: %w", err)
+ }
+ defer xdp.Close()
+ if err := writeXdpHeader(xdp, probes); err != nil {
+ return fmt.Errorf("could not write xdp related features header file: %w", err)
+ }
+
+ return nil
+}
+
+// ExecuteHeaderProbes probes the kernel for a specific set of BPF features
+// which are currently used to generate various feature macros for the datapath.
+// The probe results returned in FeatureProbes are then used in the respective
+// function that writes the actual C macro definitions.
+// Further needed probes should be added here, while new macro strings need to
+// be added in the correct `write*Header()` function.
+func ExecuteHeaderProbes() *FeatureProbes {
+ probes := FeatureProbes{
+ ProgramHelpers: make(map[ProgramHelper]bool),
+ Misc: miscFeatures{},
+ }
+
+ progHelpers := []ProgramHelper{
+ // common probes
+ {ebpf.CGroupSock, asm.FnGetNetnsCookie},
+ {ebpf.CGroupSockAddr, asm.FnGetNetnsCookie},
+ {ebpf.CGroupSockAddr, asm.FnGetSocketCookie},
+ {ebpf.CGroupSock, asm.FnJiffies64},
+ {ebpf.CGroupSockAddr, asm.FnJiffies64},
+ {ebpf.SchedCLS, asm.FnJiffies64},
+ {ebpf.XDP, asm.FnJiffies64},
+ {ebpf.CGroupSockAddr, asm.FnSkLookupTcp},
+ {ebpf.CGroupSockAddr, asm.FnSkLookupUdp},
+ {ebpf.CGroupSockAddr, asm.FnGetCurrentCgroupId},
+ {ebpf.CGroupSock, asm.FnSetRetval},
+ {ebpf.SchedCLS, asm.FnRedirectNeigh},
+ {ebpf.SchedCLS, asm.FnRedirectPeer},
+
+ // skb related probes
+ {ebpf.SchedCLS, asm.FnSkbChangeTail},
+ {ebpf.SchedCLS, asm.FnFibLookup},
+ {ebpf.SchedCLS, asm.FnCsumLevel},
+
+ // xdp related probes
+ {ebpf.XDP, asm.FnFibLookup},
+ }
+ for _, ph := range progHelpers {
+ probes.ProgramHelpers[ph] = (HaveProgramHelper(ph.Program, ph.Helper) == nil)
+ }
+
+ probes.Misc.HaveLargeInsnLimit = (HaveLargeInstructionLimit() == nil)
+ probes.Misc.HaveFibIfindex = (HaveFibIfindex() == nil)
+
+ return &probes
+}
+
+// writeCommonHeader defines macross for bpf/include/bpf/features.h
+func writeCommonHeader(writer io.Writer, probes *FeatureProbes) error {
+ features := map[string]bool{
+ "HAVE_NETNS_COOKIE": probes.ProgramHelpers[ProgramHelper{ebpf.CGroupSock, asm.FnGetNetnsCookie}] &&
+ probes.ProgramHelpers[ProgramHelper{ebpf.CGroupSockAddr, asm.FnGetNetnsCookie}],
+ "HAVE_SOCKET_COOKIE": probes.ProgramHelpers[ProgramHelper{ebpf.CGroupSockAddr, asm.FnGetSocketCookie}],
+ "HAVE_JIFFIES": probes.ProgramHelpers[ProgramHelper{ebpf.CGroupSock, asm.FnJiffies64}] &&
+ probes.ProgramHelpers[ProgramHelper{ebpf.CGroupSockAddr, asm.FnJiffies64}] &&
+ probes.ProgramHelpers[ProgramHelper{ebpf.SchedCLS, asm.FnJiffies64}] &&
+ probes.ProgramHelpers[ProgramHelper{ebpf.XDP, asm.FnJiffies64}],
+ "HAVE_SOCKET_LOOKUP": probes.ProgramHelpers[ProgramHelper{ebpf.CGroupSockAddr, asm.FnSkLookupTcp}] &&
+ probes.ProgramHelpers[ProgramHelper{ebpf.CGroupSockAddr, asm.FnSkLookupUdp}],
+ "HAVE_CGROUP_ID": probes.ProgramHelpers[ProgramHelper{ebpf.CGroupSockAddr, asm.FnGetCurrentCgroupId}],
+ "HAVE_LARGE_INSN_LIMIT": probes.Misc.HaveLargeInsnLimit,
+ "HAVE_SET_RETVAL": probes.ProgramHelpers[ProgramHelper{ebpf.CGroupSock, asm.FnSetRetval}],
+ "HAVE_FIB_NEIGH": probes.ProgramHelpers[ProgramHelper{ebpf.SchedCLS, asm.FnRedirectNeigh}],
+ "HAVE_FIB_IFINDEX": probes.Misc.HaveFibIfindex,
+ }
+
+ return writeFeatureHeader(writer, features, true)
+}
+
+// writeSkbHeader defines macros for bpf/include/bpf/features_skb.h
+func writeSkbHeader(writer io.Writer, probes *FeatureProbes) error {
+ featuresSkb := map[string]bool{
+ "HAVE_CHANGE_TAIL": probes.ProgramHelpers[ProgramHelper{ebpf.SchedCLS, asm.FnSkbChangeTail}],
+ "HAVE_FIB_LOOKUP": probes.ProgramHelpers[ProgramHelper{ebpf.SchedCLS, asm.FnFibLookup}],
+ "HAVE_CSUM_LEVEL": probes.ProgramHelpers[ProgramHelper{ebpf.SchedCLS, asm.FnCsumLevel}],
+ }
+
+ return writeFeatureHeader(writer, featuresSkb, false)
+}
+
+// writeXdpHeader defines macros for bpf/include/bpf/features_xdp.h
+func writeXdpHeader(writer io.Writer, probes *FeatureProbes) error {
+ featuresXdp := map[string]bool{
+ "HAVE_FIB_LOOKUP": probes.ProgramHelpers[ProgramHelper{ebpf.XDP, asm.FnFibLookup}],
+ }
+
+ return writeFeatureHeader(writer, featuresXdp, false)
+}
+
+func writeFeatureHeader(writer io.Writer, features map[string]bool, common bool) error {
+ input := struct {
+ Common bool
+ Features map[string]bool
+ }{
+ Common: common,
+ Features: features,
+ }
+
+ if err := tpl.Execute(writer, input); err != nil {
+ return fmt.Errorf("could not write template: %w", err)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/probes_linux.go b/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/probes_linux.go
new file mode 100644
index 000000000..846e9c28e
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/probes_linux.go
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package probes
+
+import "github.com/vishvananda/netlink"
+
+// Family type definitions
+const (
+ NTF_EXT_LEARNED = netlink.NTF_EXT_LEARNED
+ NTF_EXT_MANAGED = netlink.NTF_EXT_MANAGED
+)
diff --git a/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/probes_unspecified.go b/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/probes_unspecified.go
new file mode 100644
index 000000000..f92efd499
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/probes_unspecified.go
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+//go:build !linux
+
+package probes
+
+// Dummy values on non-linux platform
+const (
+ NTF_EXT_LEARNED = iota
+ NTF_EXT_MANAGED
+)
diff --git a/vendor/github.com/cilium/cilium/pkg/datapath/loader/metrics/metrics.go b/vendor/github.com/cilium/cilium/pkg/datapath/loader/metrics/metrics.go
new file mode 100644
index 000000000..4896dc830
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/datapath/loader/metrics/metrics.go
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package metrics
+
+import (
+ "github.com/cilium/cilium/pkg/spanstat"
+)
+
+// SpanStat is a statistics structure for storing metrics related to datapath
+// load operations.
+type SpanStat struct {
+ BpfCompilation spanstat.SpanStat
+ BpfWaitForELF spanstat.SpanStat
+ BpfWriteELF spanstat.SpanStat
+ BpfLoadProg spanstat.SpanStat
+}
+
+// GetMap returns a map of statistic names to stats
+func (s *SpanStat) GetMap() map[string]*spanstat.SpanStat {
+ return map[string]*spanstat.SpanStat{
+ "bpfCompilation": &s.BpfCompilation,
+ "bpfWaitForELF": &s.BpfWaitForELF,
+ "bpfWriteELF": &s.BpfWriteELF,
+ "bpfLoadProg": &s.BpfLoadProg,
+ }
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/datapath/types/config.go b/vendor/github.com/cilium/cilium/pkg/datapath/types/config.go
new file mode 100644
index 000000000..2fa3c9874
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/datapath/types/config.go
@@ -0,0 +1,136 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package types
+
+import (
+ "io"
+ "net/netip"
+
+ "github.com/cilium/cilium/pkg/cidr"
+ "github.com/cilium/cilium/pkg/identity"
+ "github.com/cilium/cilium/pkg/mac"
+ "github.com/cilium/cilium/pkg/node"
+ "github.com/cilium/cilium/pkg/option"
+)
+
+// DeviceConfiguration is an interface for injecting configuration of datapath
+// options that affect lookups and logic applied at a per-device level, whether
+// those are devices associated with the endpoint or associated with the host.
+type DeviceConfiguration interface {
+ // GetCIDRPrefixLengths fetches the lists of unique IPv6 and IPv4
+ // prefix lengths used for datapath lookups, each of which is sorted
+ // from longest prefix to shortest prefix. It must return more than
+ // one element in each returned array.
+ GetCIDRPrefixLengths() (s6, s4 []int)
+
+ // GetOptions fetches the configurable datapath options from the owner.
+ GetOptions() *option.IntOptions
+}
+
+// LoadTimeConfiguration provides datapath implementations a clean interface
+// to access endpoint-specific configuration that can be changed at load time.
+type LoadTimeConfiguration interface {
+ // GetID returns a locally-significant endpoint identification number.
+ GetID() uint64
+ // StringID returns the string-formatted version of the ID from GetID().
+ StringID() string
+ // GetIdentity returns a globally-significant numeric security identity.
+ GetIdentity() identity.NumericIdentity
+
+ // GetIdentityLocked returns a globally-significant numeric security
+ // identity while assuming that the backing data structure is locked.
+ // This function should be removed in favour of GetIdentity()
+ GetIdentityLocked() identity.NumericIdentity
+
+ IPv4Address() netip.Addr
+ IPv6Address() netip.Addr
+ GetNodeMAC() mac.MAC
+}
+
+// CompileTimeConfiguration provides datapath implementations a clean interface
+// to access endpoint-specific configuration that can only be changed at
+// compile time.
+type CompileTimeConfiguration interface {
+ DeviceConfiguration
+
+ // TODO: Move this detail into the datapath
+ ConntrackLocalLocked() bool
+
+ // RequireARPPassthrough returns true if the datapath must implement
+ // ARP passthrough for this endpoint
+ RequireARPPassthrough() bool
+
+ // RequireEgressProg returns true if the endpoint requires an egress
+ // program attached to the InterfaceName() invoking the section
+ // "to-container"
+ RequireEgressProg() bool
+
+ // RequireRouting returns true if the endpoint requires BPF routing to
+ // be enabled, when disabled, routing is delegated to Linux routing
+ RequireRouting() bool
+
+ // RequireEndpointRoute returns true if the endpoint wishes to have a
+ // per endpoint route installed in the host's routing table to point to
+ // the endpoint's interface
+ RequireEndpointRoute() bool
+
+ // GetPolicyVerdictLogFilter returns the PolicyVerdictLogFilter for the endpoint
+ GetPolicyVerdictLogFilter() uint32
+
+ // IsHost returns true if the endpoint is the host endpoint.
+ IsHost() bool
+}
+
+// EndpointConfiguration provides datapath implementations a clean interface
+// to access endpoint-specific configuration when configuring the datapath.
+type EndpointConfiguration interface {
+ CompileTimeConfiguration
+ LoadTimeConfiguration
+}
+
+// ConfigWriter is anything which writes the configuration for various datapath
+// program types.
+type ConfigWriter interface {
+ // WriteNodeConfig writes the implementation-specific configuration of
+ // node-wide options into the specified writer.
+ WriteNodeConfig(io.Writer, *LocalNodeConfiguration) error
+
+ // WriteNetdevConfig writes the implementation-specific configuration
+ // of configurable options to the specified writer. Options specified
+ // here will apply to base programs and not to endpoints, though
+ // endpoints may have equivalent configurable options.
+ WriteNetdevConfig(io.Writer, DeviceConfiguration) error
+
+ // WriteTemplateConfig writes the implementation-specific configuration
+ // of configurable options for BPF templates to the specified writer.
+ WriteTemplateConfig(w io.Writer, cfg EndpointConfiguration) error
+
+ // WriteEndpointConfig writes the implementation-specific configuration
+ // of configurable options for the endpoint to the specified writer.
+ WriteEndpointConfig(w io.Writer, cfg EndpointConfiguration) error
+}
+
+// RemoteSNATDstAddrExclusionCIDRv4 returns a CIDR for SNAT exclusion. Any
+// packet sent from a local endpoint to an IP address belonging to the CIDR
+// should not be SNAT'd.
+func RemoteSNATDstAddrExclusionCIDRv4() *cidr.CIDR {
+ if c := option.Config.GetIPv4NativeRoutingCIDR(); c != nil {
+ // ipv4-native-routing-cidr is set, so use it
+ return c
+ }
+
+ return node.GetIPv4AllocRange()
+}
+
+// RemoteSNATDstAddrExclusionCIDRv6 returns a IPv6 CIDR for SNAT exclusion. Any
+// packet sent from a local endpoint to an IP address belonging to the CIDR
+// should not be SNAT'd.
+func RemoteSNATDstAddrExclusionCIDRv6() *cidr.CIDR {
+ if c := option.Config.GetIPv6NativeRoutingCIDR(); c != nil {
+ // ipv6-native-routing-cidr is set, so use it
+ return c
+ }
+
+ return node.GetIPv6AllocRange()
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/datapath/types/datapath.go b/vendor/github.com/cilium/cilium/pkg/datapath/types/datapath.go
new file mode 100644
index 000000000..40c34b6b6
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/datapath/types/datapath.go
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package types
+
+// Datapath is the interface to abstract all datapath interactions. The
+// abstraction allows to implement the datapath requirements with multiple
+// implementations
+type Datapath interface {
+ ConfigWriter
+ IptablesManager
+
+ // Node must return the handler for node events
+ Node() NodeHandler
+
+ NodeIDs() NodeIDHandler
+
+ NodeNeighbors() NodeNeighbors
+
+ // LocalNodeAddressing must return the node addressing implementation
+ // of the local node
+ LocalNodeAddressing() NodeAddressing
+
+ // Loader must return the implementation of the loader, which is responsible
+ // for loading, reloading, and compiling datapath programs.
+ Loader() Loader
+
+ // WireguardAgent returns the WireGuard agent for the local node
+ WireguardAgent() WireguardAgent
+
+ // LBMap returns the load-balancer map
+ LBMap() LBMap
+
+ Procfs() string
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/datapath/types/endpoint.go b/vendor/github.com/cilium/cilium/pkg/datapath/types/endpoint.go
new file mode 100644
index 000000000..df3bc01aa
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/datapath/types/endpoint.go
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package types
+
+import "github.com/sirupsen/logrus"
+
+// Endpoint provides access endpoint configuration information that is necessary
+// to compile and load the datapath.
+type Endpoint interface {
+ EndpointConfiguration
+ InterfaceName() string
+ Logger(subsystem string) *logrus.Entry
+ StateDir() string
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/datapath/types/lbmap.go b/vendor/github.com/cilium/cilium/pkg/datapath/types/lbmap.go
new file mode 100644
index 000000000..44ba2bbe6
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/datapath/types/lbmap.go
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package types
+
+import (
+ "net"
+ "sort"
+
+ "github.com/cilium/cilium/pkg/cidr"
+ "github.com/cilium/cilium/pkg/loadbalancer"
+)
+
+// LBMap is the interface describing methods for manipulating service maps.
+type LBMap interface {
+ UpsertService(*UpsertServiceParams) error
+ UpsertMaglevLookupTable(uint16, map[string]*loadbalancer.Backend, bool) error
+ IsMaglevLookupTableRecreated(bool) bool
+ DeleteService(loadbalancer.L3n4AddrID, int, bool, loadbalancer.SVCNatPolicy) error
+ AddBackend(*loadbalancer.Backend, bool) error
+ UpdateBackendWithState(*loadbalancer.Backend) error
+ DeleteBackendByID(loadbalancer.BackendID) error
+ AddAffinityMatch(uint16, loadbalancer.BackendID) error
+ DeleteAffinityMatch(uint16, loadbalancer.BackendID) error
+ UpdateSourceRanges(uint16, []*cidr.CIDR, []*cidr.CIDR, bool) error
+ DumpServiceMaps() ([]*loadbalancer.SVC, []error)
+ DumpBackendMaps() ([]*loadbalancer.Backend, error)
+ DumpAffinityMatches() (BackendIDByServiceIDSet, error)
+ DumpSourceRanges(bool) (SourceRangeSetByServiceID, error)
+}
+
+type UpsertServiceParams struct {
+ ID uint16
+ IP net.IP
+ Port uint16
+
+ // PreferredBackends is a subset of ActiveBackends
+ // Note: this is only used in clustermesh with service affinity annotation.
+ PreferredBackends map[string]*loadbalancer.Backend
+ ActiveBackends map[string]*loadbalancer.Backend
+ NonActiveBackends []loadbalancer.BackendID
+ PrevBackendsCount int
+ IPv6 bool
+ Type loadbalancer.SVCType
+ NatPolicy loadbalancer.SVCNatPolicy
+ ExtLocal bool
+ IntLocal bool
+ Scope uint8
+ SessionAffinity bool
+ SessionAffinityTimeoutSec uint32
+ CheckSourceRange bool
+ UseMaglev bool
+ L7LBProxyPort uint16 // Non-zero for L7 LB services
+ Name loadbalancer.ServiceName // Fully qualified name of the service
+ LoopbackHostport bool
+}
+
+// GetOrderedBackends returns an ordered list of backends with all the sorted
+// preferred backend followed by active and non-active backends.
+// Encapsulates logic to be also used in unit tests.
+func (p *UpsertServiceParams) GetOrderedBackends() []loadbalancer.BackendID {
+ backendIDs := make([]loadbalancer.BackendID, 0, len(p.ActiveBackends)+len(p.NonActiveBackends))
+ for _, b := range p.ActiveBackends {
+ backendIDs = append(backendIDs, b.ID)
+ }
+
+ preferredMap := map[loadbalancer.BackendID]struct{}{}
+ for _, b := range p.PreferredBackends {
+ preferredMap[b.ID] = struct{}{}
+ }
+
+ // Map iterations are non-deterministic so sort the backends by their IDs
+ // in order to maintain the same order before they are populated in BPF maps.
+ // This will minimize disruption to existing connections to the backends in the datapath.
+ sort.Slice(backendIDs, func(i, j int) bool {
+ // compare preferred flags of two backend IDs
+ _, firstPreferred := preferredMap[backendIDs[i]]
+ _, secondPreferred := preferredMap[backendIDs[j]]
+
+ if firstPreferred && secondPreferred {
+ return backendIDs[i] < backendIDs[j]
+ }
+
+ if firstPreferred {
+ return true
+ }
+
+ if secondPreferred {
+ return false
+ }
+
+ return backendIDs[i] < backendIDs[j]
+ })
+
+ // Add the non-active backends to the end of preferred/active backends list so that they are
+ // not considered while selecting backends to load-balance service traffic.
+ if len(p.NonActiveBackends) > 0 {
+ backendIDs = append(backendIDs, p.NonActiveBackends...)
+ }
+
+ return backendIDs
+}
+
+// BackendIDByServiceIDSet is the type of a set for checking whether a backend
+// belongs to a given service
+type BackendIDByServiceIDSet map[uint16]map[loadbalancer.BackendID]struct{} // svc ID => backend ID
+
+type SourceRangeSetByServiceID map[uint16][]*cidr.CIDR // svc ID => src range CIDRs
diff --git a/vendor/github.com/cilium/cilium/pkg/datapath/types/loader.go b/vendor/github.com/cilium/cilium/pkg/datapath/types/loader.go
new file mode 100644
index 000000000..aabd5e3ee
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/datapath/types/loader.go
@@ -0,0 +1,81 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package types
+
+import (
+ "context"
+ "io"
+ "net"
+
+ "github.com/cilium/cilium/pkg/datapath/loader/metrics"
+ "github.com/cilium/cilium/pkg/lock"
+)
+
+// Loader is an interface to abstract out loading of datapath programs.
+type Loader interface {
+ CallsMapPath(id uint16) string
+ CustomCallsMapPath(id uint16) string
+ CompileAndLoad(ctx context.Context, ep Endpoint, stats *metrics.SpanStat) error
+ CompileOrLoad(ctx context.Context, ep Endpoint, stats *metrics.SpanStat) error
+ ReloadDatapath(ctx context.Context, ep Endpoint, stats *metrics.SpanStat) error
+ EndpointHash(cfg EndpointConfiguration) (string, error)
+ Unload(ep Endpoint)
+ Reinitialize(ctx context.Context, o BaseProgramOwner, deviceMTU int, iptMgr IptablesManager, p Proxy) error
+ HostDatapathInitialized() <-chan struct{}
+}
+
+// BaseProgramOwner is any type for which a loader is building base programs.
+type BaseProgramOwner interface {
+ DeviceConfiguration
+ GetCompilationLock() *lock.RWMutex
+ Datapath() Datapath
+ LocalConfig() *LocalNodeConfiguration
+ SetPrefilter(pf PreFilter)
+}
+
+// PreFilter an interface for an XDP pre-filter.
+type PreFilter interface {
+ WriteConfig(fw io.Writer)
+ Dump(to []string) ([]string, int64)
+ Insert(revision int64, cidrs []net.IPNet) error
+ Delete(revision int64, cidrs []net.IPNet) error
+}
+
+// Proxy is any type which installs rules related to redirecting traffic to
+// a proxy.
+type Proxy interface {
+ ReinstallRoutingRules() error
+ ReinstallIPTablesRules(ctx context.Context) error
+}
+
+// IptablesManager manages iptables rules.
+type IptablesManager interface {
+ // InstallProxyRules creates the necessary datapath config (e.g., iptables
+ // rules for redirecting host proxy traffic on a specific ProxyPort)
+ InstallProxyRules(ctx context.Context, proxyPort uint16, ingress, localOnly bool, name string) error
+
+ // SupportsOriginalSourceAddr tells if the datapath supports
+ // use of original source addresses in proxy upstream
+ // connections.
+ SupportsOriginalSourceAddr() bool
+ InstallRules(ctx context.Context, ifName string, quiet, install bool) error
+
+ // GetProxyPort fetches the existing proxy port configured for the
+ // specified listener. Used early in bootstrap to reopen proxy ports.
+ GetProxyPort(listener string) uint16
+
+ // InstallNoTrackRules is explicitly called when a pod has valid
+ // "policy.cilium.io/no-track-port" annotation. When
+ // InstallNoConntrackIptRules flag is set, a super set of v4 NOTRACK
+ // rules will be automatically installed upon agent bootstrap (via
+ // function addNoTrackPodTrafficRules) and this function will be
+ // skipped. When InstallNoConntrackIptRules is not set, this function
+ // will be executed to install NOTRACK rules. The rules installed by
+ // this function is very specific, for now, the only user is
+ // node-local-dns pods.
+ InstallNoTrackRules(IP string, port uint16, ipv6 bool) error
+
+ // See comments for InstallNoTrackRules.
+ RemoveNoTrackRules(IP string, port uint16, ipv6 bool) error
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/datapath/types/node.go b/vendor/github.com/cilium/cilium/pkg/datapath/types/node.go
new file mode 100644
index 000000000..4320d927e
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/datapath/types/node.go
@@ -0,0 +1,163 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package types
+
+import (
+ "context"
+ "net"
+
+ "github.com/cilium/cilium/api/v1/models"
+ "github.com/cilium/cilium/pkg/cidr"
+ "github.com/cilium/cilium/pkg/mtu"
+ nodeTypes "github.com/cilium/cilium/pkg/node/types"
+)
+
+// LocalNodeConfiguration represents the configuration of the local node
+type LocalNodeConfiguration struct {
+ // MtuConfig is the MTU configuration of the node.
+ //
+ // This field is immutable at runtime. The value will not change in
+ // subsequent calls to NodeConfigurationChanged().
+ MtuConfig mtu.Configuration
+
+ // AuxiliaryPrefixes is the list of auxiliary prefixes that should be
+ // configured in addition to the node PodCIDR
+ //
+ // This field is mutable. The implementation of
+ // NodeConfigurationChanged() must adjust the routes accordingly.
+ AuxiliaryPrefixes []*cidr.CIDR
+
+ // EnableIPv4 enables use of IPv4. Routing to the IPv4 allocation CIDR
+ // of other nodes must be enabled.
+ //
+ // This field is immutable at runtime. The value will not change in
+ // subsequent calls to NodeConfigurationChanged().
+ EnableIPv4 bool
+
+ // EnableIPv6 enables use of IPv6. Routing to the IPv6 allocation CIDR
+ // of other nodes must be enabled.
+ //
+ // This field is immutable at runtime. The value will not change in
+ // subsequent calls to NodeConfigurationChanged().
+ EnableIPv6 bool
+
+ // UseSingleClusterRoute enables the use of a single cluster-wide route
+ // to direct traffic from the host into the Cilium datapath. This
+ // avoids the requirement to install a separate route for each node
+ // CIDR and can thus improve the overhead when operating large clusters
+ // with significant node event churn due to auto-scaling.
+ //
+ // Use of UseSingleClusterRoute must be compatible with
+ // EnableAutoDirectRouting. When both are enabled, any direct node
+ // route must take precedence over the cluster-wide route as per LPM
+ // routing definition.
+ //
+ // This field is mutable. The implementation of
+ // NodeConfigurationChanged() must adjust the routes accordingly.
+ UseSingleClusterRoute bool
+
+ // EnableEncapsulation enables use of encapsulation in communication
+ // between nodes.
+ //
+ // This field is immutable at runtime. The value will not change in
+ // subsequent calls to NodeConfigurationChanged().
+ EnableEncapsulation bool
+
+ // EnableAutoDirectRouting enables the use of direct routes for
+ // communication between nodes if two nodes have direct L2
+ // connectivity.
+ //
+ // EnableAutoDirectRouting must be compatible with EnableEncapsulation
+ // and must provide a fallback to use encapsulation if direct routing
+ // is not feasible and encapsulation is enabled.
+ //
+ // This field is immutable at runtime. The value will not change in
+ // subsequent calls to NodeConfigurationChanged().
+ EnableAutoDirectRouting bool
+
+ // EnableLocalNodeRoute enables installation of the route which points
+ // the allocation prefix of the local node. Disabling this option is
+ // useful when another component is responsible for the routing of the
+ // allocation CIDR IPs into Cilium endpoints.
+ EnableLocalNodeRoute bool
+
+ // EnableIPSec enables IPSec routes
+ EnableIPSec bool
+
+ // EncryptNode enables encrypting NodeIP traffic requires EnableIPSec
+ EncryptNode bool
+
+ // IPv4PodSubnets is a list of IPv4 subnets that pod IPs are assigned from
+ // these are then used when encryption is enabled to configure the node
+ // for encryption over these subnets at node initialization.
+ IPv4PodSubnets []*net.IPNet
+
+ // IPv6PodSubnets is a list of IPv6 subnets that pod IPs are assigned from
+ // these are then used when encryption is enabled to configure the node
+ // for encryption over these subnets at node initialization.
+ IPv6PodSubnets []*net.IPNet
+}
+
+// NodeHandler handles node related events such as addition, update or deletion
+// of nodes or changes to the local node configuration.
+//
+// Node events apply to the local node as well as to remote nodes. The
+// implementation can differ between the own local node and remote nodes by
+// calling node.IsLocal().
+type NodeHandler interface {
+ // Name identifies the handler, this is used in logging/reporting handler
+ // reconciliation errors.
+ Name() string
+
+ // NodeAdd is called when a node is discovered for the first time.
+ NodeAdd(newNode nodeTypes.Node) error
+
+ // NodeUpdate is called when a node definition changes. Both the old
+ // and new node definition is provided. NodeUpdate() is never called
+ // before NodeAdd() is called for a particular node.
+ NodeUpdate(oldNode, newNode nodeTypes.Node) error
+
+ // NodeDelete is called after a node has been deleted
+ NodeDelete(node nodeTypes.Node) error
+
+ // AllNodeValidateImplementation is called to validate the implementation
+ // of all nodes in the node cache.
+ AllNodeValidateImplementation()
+
+ // NodeValidateImplementation is called to validate the implementation of
+ // the node in the datapath. This function is intended to be run on an
+ // interval to ensure that the datapath is consistently converged.
+ NodeValidateImplementation(node nodeTypes.Node) error
+
+ // NodeConfigurationChanged is called when the local node configuration
+ // has changed
+ NodeConfigurationChanged(config LocalNodeConfiguration) error
+}
+
+type NodeNeighbors interface {
+ // NodeNeighDiscoveryEnabled returns whether node neighbor discovery is enabled
+ NodeNeighDiscoveryEnabled() bool
+
+ // NodeNeighborRefresh is called to refresh node neighbor table
+ NodeNeighborRefresh(ctx context.Context, node nodeTypes.Node)
+
+ // NodeCleanNeighbors cleans all neighbor entries for the direct routing device
+ // and the encrypt interface.
+ NodeCleanNeighbors(migrateOnly bool)
+}
+
+type NodeIDHandler interface {
+ // GetNodeIP returns the string node IP that was previously registered as the given node ID.
+ GetNodeIP(uint16) string
+
+ // GetNodeID gets the node ID for the given node IP. If none is found, exists is false.
+ GetNodeID(nodeIP net.IP) (nodeID uint16, exists bool)
+
+ // DumpNodeIDs returns all node IDs and their associated IP addresses.
+ DumpNodeIDs() []*models.NodeID
+
+ // RestoreNodeIDs restores node IDs and their associated IP addresses from the
+ // BPF map and into the node handler in-memory copy.
+ RestoreNodeIDs()
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/datapath/types/node_addressing.go b/vendor/github.com/cilium/cilium/pkg/datapath/types/node_addressing.go
new file mode 100644
index 000000000..b4733fe69
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/datapath/types/node_addressing.go
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package types
+
+import (
+ "net"
+
+ "github.com/cilium/cilium/pkg/cidr"
+)
+
+// NodeAddressingFamily is the node addressing information for a particular
+// address family
+type NodeAddressingFamily interface {
+ // Router is the address that will act as the router on each node where
+ // an agent is running on. Endpoints have a default route that points
+ // to this address.
+ Router() net.IP
+
+ // PrimaryExternal is the primary external address of the node. Nodes
+ // must be able to reach each other via this address.
+ PrimaryExternal() net.IP
+
+ // AllocationCIDR is the CIDR used for IP allocation of all endpoints
+ // on the node
+ AllocationCIDR() *cidr.CIDR
+
+ // LocalAddresses lists all local addresses
+ LocalAddresses() ([]net.IP, error)
+
+ // LoadBalancerNodeAddresses lists all addresses on which HostPort and
+ // NodePort services should be responded to
+ LoadBalancerNodeAddresses() []net.IP
+}
+
+// NodeAddressing implements addressing of a node
+type NodeAddressing interface {
+ IPv6() NodeAddressingFamily
+ IPv4() NodeAddressingFamily
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/datapath/types/wireguard.go b/vendor/github.com/cilium/cilium/pkg/datapath/types/wireguard.go
new file mode 100644
index 000000000..46aa3ca2d
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/datapath/types/wireguard.go
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package types
+
+import (
+ "net"
+
+ "github.com/cilium/cilium/api/v1/models"
+)
+
+// WireguardAgent manages the WireGuard peers
+type WireguardAgent interface {
+ UpdatePeer(nodeName, pubKeyHex string, nodeIPv4, nodeIPv6 net.IP) error
+ DeletePeer(nodeName string) error
+ Status(includePeers bool) (*models.WireguardStatus, error)
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/debug/subsystem.go b/vendor/github.com/cilium/cilium/pkg/debug/subsystem.go
new file mode 100644
index 000000000..62162e364
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/debug/subsystem.go
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package debug
+
+import (
+ "fmt"
+
+ "github.com/cilium/cilium/pkg/lock"
+)
+
+// StatusFunc is a function returning the debug status of a subsytem. It is
+// passed into RegisterStatusFunc().
+type StatusFunc func() string
+
+// StatusMap is the collection of debug status of all subsystems. The key is
+// the subsystem name. The value is the subsystem debug status.
+type StatusMap map[string]string
+
+// StatusObject is the interface an object must impelement to be able to be
+// passed into RegisterStatusObject().
+type StatusObject interface {
+ // DebugStatus() is the equivalent of StatusFunc. It must return the
+ // debug status as a string.
+ DebugStatus() string
+}
+
+type functionMap map[string]StatusFunc
+
+type statusFunctions struct {
+ functions functionMap
+ mutex lock.RWMutex
+}
+
+func newStatusFunctions() statusFunctions {
+ return statusFunctions{
+ functions: functionMap{},
+ }
+}
+
+func (s *statusFunctions) register(name string, fn StatusFunc) error {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+
+ if _, ok := s.functions[name]; ok {
+ return fmt.Errorf("subsystem already registered")
+ }
+
+ s.functions[name] = fn
+
+ return nil
+}
+
+func (s *statusFunctions) registerStatusObject(name string, obj StatusObject) error {
+ return s.register(name, func() string { return obj.DebugStatus() })
+}
+
+func (s *statusFunctions) collectStatus() StatusMap {
+ fnCopy := functionMap{}
+
+ // Make a copy to not hold the mutex while collecting the status
+ s.mutex.RLock()
+ for name, fn := range s.functions {
+ fnCopy[name] = fn
+ }
+ s.mutex.RUnlock()
+
+ status := StatusMap{}
+
+ for name, fn := range fnCopy {
+ status[name] = fn()
+ }
+
+ return status
+}
+
+var globalStatusFunctions = newStatusFunctions()
+
+// RegisterStatusFunc registers a subsystem and associates a status function to
+// call for debug status collection
+func RegisterStatusFunc(name string, fn StatusFunc) error {
+ return globalStatusFunctions.register(name, fn)
+}
+
+// RegisterStatusObject registers a subsystem and associated a status object on
+// which DebugStatus() is called to collect debug status
+func RegisterStatusObject(name string, obj StatusObject) error {
+ return globalStatusFunctions.registerStatusObject(name, obj)
+}
+
+// CollectSubsystemStatus collects the status of all subsystems and returns it
+func CollectSubsystemStatus() StatusMap {
+ return globalStatusFunctions.collectStatus()
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/defaults/defaults.go b/vendor/github.com/cilium/cilium/pkg/defaults/defaults.go
index 1258c1af8..22f438024 100644
--- a/vendor/github.com/cilium/cilium/pkg/defaults/defaults.go
+++ b/vendor/github.com/cilium/cilium/pkg/defaults/defaults.go
@@ -86,6 +86,15 @@ const (
// HubbleRecorderSinkQueueSize is the queue size for each recorder sink
HubbleRecorderSinkQueueSize = 1024
+ // HubbleRedactEnabled controls if sensitive information will be redacted from L7 flows
+ HubbleRedactEnabled = false
+
+ // HubbleRedactHttpURLQuery controls if the URL query will be redacted from flows
+ HubbleRedactHttpURLQuery = false
+
+ // HubbleRedactKafkaApiKey controls if the Kafka API key will be redacted from flows
+ HubbleRedactKafkaApiKey = false
+
// MonitorSockPath1_2 is the path to the UNIX domain socket used to
// distribute BPF and agent events to listeners.
// This is the 1.2 protocol version.
@@ -207,9 +216,6 @@ const (
// EnableHostLegacyRouting is the default value for using the old routing path via stack.
EnableHostLegacyRouting = false
- // K8sEnableEndpointSlice is the default value for k8s EndpointSlice feature.
- K8sEnableEndpointSlice = true
-
// PreAllocateMaps is the default value for BPF map preallocation
PreAllocateMaps = true
@@ -274,6 +280,10 @@ const (
// EnableHealthCheckNodePort
EnableHealthCheckNodePort = true
+ // EnableHealthCheckLoadBalancerIP is the default value for
+ // EnableHealthCheckLoadBalancerIP
+ EnableHealthCheckLoadBalancerIP = false
+
// AlignCheckerName is the BPF object name for the alignchecker.
AlignCheckerName = "bpf_alignchecker.o"
@@ -414,16 +424,6 @@ const (
// IPAMAPIQPSLimit is the default QPS limit when rate limiting access to external APIs
IPAMAPIQPSLimit = 4.0
- // IPAMPodCIDRAllocationThreshold is the default value for
- // CiliumNode.Spec.IPAM.PodCIDRAllocationThreshold if no value is set
- // Defaults to 8, which is similar to IPAMPreAllocation
- IPAMPodCIDRAllocationThreshold = 8
-
- // IPAMPodCIDRReleaseThreshold is the default value for
- // CiliumNode.Spec.IPAM.PodCIDRReleaseThreshold if no value is set
- // Defaults to 16, which is 2x the allocation threshold to avoid flapping
- IPAMPodCIDRReleaseThreshold = 16
-
// AutoCreateCiliumNodeResource enables automatic creation of a
// CiliumNode resource for the local node
AutoCreateCiliumNodeResource = true
@@ -502,10 +502,10 @@ const (
// InstallNoConntrackRules instructs Cilium to install Iptables rules to skip netfilter connection tracking on all pod traffic.
InstallNoConntrackIptRules = false
- // WireguardSubnetV4 is a default wireguard tunnel subnet
+ // WireguardSubnetV4 is a default WireGuard tunnel subnet
WireguardSubnetV4 = "172.16.43.0/24"
- // WireguardSubnetV6 is a default wireguard tunnel subnet
+ // WireguardSubnetV6 is a default WireGuard tunnel subnet
WireguardSubnetV6 = "fdc9:281f:04d7:9ee9::1/64"
// ExternalClusterIP enables cluster external access to ClusterIP services.
@@ -573,4 +573,6 @@ var (
"cilium_lb6_source_range": "enabled,128,0",
"cilium_lb6_affinity_match": "enabled,128,0",
}
+
+ PolicyCIDRMatchMode = []string{}
)
diff --git a/vendor/github.com/cilium/cilium/pkg/defaults/node.go b/vendor/github.com/cilium/cilium/pkg/defaults/node.go
index facc7dec8..4cfeef002 100644
--- a/vendor/github.com/cilium/cilium/pkg/defaults/node.go
+++ b/vendor/github.com/cilium/cilium/pkg/defaults/node.go
@@ -23,6 +23,18 @@ const (
// SecondHostDevice is the name of the second interface of the host veth pair.
SecondHostDevice = "cilium_net"
+ // IPIPv4Device is a device of type 'ipip', created by the agent.
+ IPIPv4Device = "cilium_ipip4"
+
+ // IPIPv6Device is a device of type 'ip6tnl', created by the agent.
+ IPIPv6Device = "cilium_ipip6"
+
+ // GeneveDevice is a device of type 'geneve', created by the agent.
+ GeneveDevice = "cilium_geneve"
+
+ // VxlanDevice is a device of type 'vxlan', created by the agent.
+ VxlanDevice = "cilium_vxlan"
+
// CiliumK8sAnnotationPrefix is the prefix key for the annotations used in kubernetes.
CiliumK8sAnnotationPrefix = "cilium.io/"
@@ -52,4 +64,13 @@ var (
// IPv4DefaultRoute is the default IPv4 route.
IPv4DefaultRoute = net.IPNet{IP: net.IPv4zero, Mask: net.CIDRMask(0, 32)}
+
+ // ExcludedDevicePrefixes are prefixes that we don't consider during automatic device detection.
+ ExcludedDevicePrefixes = []string{
+ "cilium_",
+ "lo",
+ "lxc",
+ "cni",
+ "docker",
+ }
)
diff --git a/vendor/github.com/cilium/cilium/pkg/endpoint/id/id.go b/vendor/github.com/cilium/cilium/pkg/endpoint/id/id.go
new file mode 100644
index 000000000..a785e2d0f
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/endpoint/id/id.go
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package id
+
+import (
+ "fmt"
+ "math"
+ "net/netip"
+ "strconv"
+ "strings"
+)
+
+// MaxEndpointID is the maximum endpoint identifier.
+const MaxEndpointID = math.MaxUint16
+
+// PrefixType describes the type of endpoint identifier
+type PrefixType string
+
+func (s PrefixType) String() string { return string(s) }
+
+const (
+ // CiliumLocalIdPrefix is a numeric identifier with local scope. It has
+ // no cluster wide meaning and is only unique in the scope of a single
+ // agent. An endpoint is guaranteed to always have a local scope identifier.
+ CiliumLocalIdPrefix PrefixType = "cilium-local"
+
+ // CiliumGlobalIdPrefix is an endpoint identifier with global scope.
+ // This addressing mechanism is currently unused.
+ CiliumGlobalIdPrefix PrefixType = "cilium-global"
+
+ // ContainerIdPrefix is used to address an endpoint via its primary
+ // container ID. The container ID is specific to the container runtime
+ // in use. Only the primary container that defines the networking scope
+ // can be used to address an endpoint.
+ // This can only be used to look up endpoints which have not opted-out of
+ // legacy identifiers.
+ // Deprecated. Use CNIAttachmentIdPrefix instead
+ ContainerIdPrefix PrefixType = "container-id"
+
+ // CNIAttachmentIdPrefix is used to address an endpoint via its primary
+ // container ID and container interface passed to the CNI plugin.
+ // This attachment ID uniquely identifies a CNI ADD and CNI DEL invocation pair.
+ CNIAttachmentIdPrefix PrefixType = "cni-attachment-id"
+
+ // DockerEndpointPrefix is used to address an endpoint via the Docker
+ // endpoint ID. This method is only possible if the endpoint was
+ // created via the cilium-docker plugin and the container is backed by
+ // the libnetwork abstraction.
+ DockerEndpointPrefix PrefixType = "docker-endpoint"
+
+ // ContainerNamePrefix is used to address the endpoint via the
+ // container's name. This addressing mechanism depends on the container
+ // runtime. Only the primary container that the networking scope can be
+ // used to address an endpoint.
+ // This can only be used to look up endpoints which have not opted-out of
+ // legacy identifiers.
+ // Deprecated. Use CNIAttachmentIdPrefix instead
+ ContainerNamePrefix PrefixType = "container-name"
+
+ // CEPNamePrefix is used to address an endpoint via its Kubernetes
+ // CiliumEndpoint resource name. This addressing only works if the endpoint
+ // is represented as a Kubernetes CiliumEndpoint resource.
+ CEPNamePrefix PrefixType = "cep-name"
+
+ // PodNamePrefix is used to address an endpoint via the Kubernetes pod
+ // name. This addressing only works if the endpoint represents as
+ // Kubernetes pod.
+ // This can only be used to look up endpoints which have not opted-out of
+ // legacy identifiers.
+ // Deprecated. May not be unique. Use CEPNamePrefix instead.
+ PodNamePrefix PrefixType = "pod-name"
+
+ // IPv4Prefix is used to address an endpoint via the endpoint's IPv4
+ // address.
+ IPv4Prefix PrefixType = "ipv4"
+
+ // IPv6Prefix is the prefix used to refer to an endpoint via IPv6 address
+ IPv6Prefix PrefixType = "ipv6"
+)
+
+// NewCiliumID returns a new endpoint identifier of type CiliumLocalIdPrefix
+func NewCiliumID(id int64) string {
+ return NewID(CiliumLocalIdPrefix, strconv.FormatInt(id, 10))
+}
+
+// NewID returns a new endpoint identifier
+func NewID(prefix PrefixType, id string) string {
+ return string(prefix) + ":" + id
+}
+
+// NewIPPrefixID returns an identifier based on the IP address specified. If ip
+// is invalid, an empty string is returned.
+func NewIPPrefixID(ip netip.Addr) string {
+ if ip.IsValid() {
+ if ip.Is4() {
+ return NewID(IPv4Prefix, ip.String())
+ }
+ return NewID(IPv6Prefix, ip.String())
+ }
+ return ""
+}
+
+// NewCNIAttachmentID returns an identifier based on the CNI attachment ID. If
+// the containerIfName is empty, only the containerID will be used.
+func NewCNIAttachmentID(containerID, containerIfName string) string {
+ id := containerID
+ if containerIfName != "" {
+ id = containerID + ":" + containerIfName
+ }
+ return NewID(CNIAttachmentIdPrefix, id)
+}
+
+// splitID splits ID into prefix and id. No validation is performed on prefix.
+func splitID(id string) (PrefixType, string) {
+ if idx := strings.IndexByte(id, ':'); idx > -1 {
+ return PrefixType(id[:idx]), id[idx+1:]
+ }
+
+ // default prefix
+ return CiliumLocalIdPrefix, id
+}
+
+// ParseCiliumID parses id as cilium endpoint id and returns numeric portion.
+func ParseCiliumID(id string) (int64, error) {
+ prefix, id := splitID(id)
+ if prefix != CiliumLocalIdPrefix {
+ return 0, fmt.Errorf("not a cilium identifier")
+ }
+ n, err := strconv.ParseInt(id, 0, 64)
+ if err != nil || n < 0 {
+ return 0, fmt.Errorf("invalid numeric cilium id: %s", err)
+ }
+ if n > MaxEndpointID {
+ return 0, fmt.Errorf("endpoint id too large: %d", n)
+ }
+ return n, nil
+}
+
+// Parse parses a string as an endpoint identified consists of an optional
+// prefix [prefix:] followed by the identifier.
+func Parse(id string) (PrefixType, string, error) {
+ prefix, id := splitID(id)
+ switch prefix {
+ case CiliumLocalIdPrefix,
+ CiliumGlobalIdPrefix,
+ CNIAttachmentIdPrefix,
+ ContainerIdPrefix,
+ DockerEndpointPrefix,
+ ContainerNamePrefix,
+ CEPNamePrefix,
+ PodNamePrefix,
+ IPv4Prefix,
+ IPv6Prefix:
+ return prefix, id, nil
+ }
+
+ return "", "", fmt.Errorf("unknown endpoint ID prefix \"%s\"", prefix)
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/endpoint/id/identifiers.go b/vendor/github.com/cilium/cilium/pkg/endpoint/id/identifiers.go
new file mode 100644
index 000000000..d2deaa391
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/endpoint/id/identifiers.go
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package id
+
+// Identifiers is a collection of attributes that identify the Endpoint through
+// different systems. For examples of the type of Identifiers, see PrefixType.
+type Identifiers map[PrefixType]string
diff --git a/vendor/github.com/cilium/cilium/pkg/endpoint/regeneration/owner.go b/vendor/github.com/cilium/cilium/pkg/endpoint/regeneration/owner.go
new file mode 100644
index 000000000..6e45689cb
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/endpoint/regeneration/owner.go
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package regeneration
+
+import (
+ "context"
+
+ datapath "github.com/cilium/cilium/pkg/datapath/types"
+ "github.com/cilium/cilium/pkg/fqdn/restore"
+ "github.com/cilium/cilium/pkg/identity"
+ "github.com/cilium/cilium/pkg/lock"
+ monitorAPI "github.com/cilium/cilium/pkg/monitor/api"
+ "github.com/cilium/cilium/pkg/proxy/accesslog"
+)
+
+// Owner is the interface defines the requirements for anybody owning policies.
+type Owner interface {
+ // QueueEndpointBuild puts the given endpoint in the processing queue
+ QueueEndpointBuild(ctx context.Context, epID uint64) (func(), error)
+
+ // GetCompilationLock returns the mutex responsible for synchronizing compilation
+ // of BPF programs.
+ GetCompilationLock() *lock.RWMutex
+
+ // GetCIDRPrefixLengths returns the sorted list of unique prefix lengths used
+ // by CIDR policies.
+ GetCIDRPrefixLengths() (s6, s4 []int)
+
+ // SendNotification is called to emit an agent notification
+ SendNotification(msg monitorAPI.AgentNotifyMessage) error
+
+ // Datapath returns a reference to the datapath implementation.
+ Datapath() datapath.Datapath
+
+ // GetDNSRules creates a fresh copy of DNS rules that can be used when
+ // endpoint is restored on a restart.
+ // The endpoint lock must not be held while calling this function.
+ GetDNSRules(epID uint16) restore.DNSRules
+
+ // RemoveRestoredDNSRules removes any restored DNS rules for
+ // this endpoint from the DNS proxy.
+ RemoveRestoredDNSRules(epID uint16)
+}
+
+// EndpointInfoSource returns information about an endpoint being proxied.
+// The read lock must be held when calling any method.
+type EndpointInfoSource interface {
+ GetID() uint64
+ GetIPv4Address() string
+ GetIPv6Address() string
+ GetIdentity() identity.NumericIdentity
+ GetLabels() []string
+ HasSidecarProxy() bool
+ ConntrackName() string
+ ConntrackNameLocked() string
+}
+
+// EndpointUpdater returns information about an endpoint being proxied and
+// is called back to update the endpoint when proxy events occur.
+// This is a subset of `Endpoint`.
+type EndpointUpdater interface {
+ EndpointInfoSource
+ // OnProxyPolicyUpdate is called when the proxy acknowledges that it
+ // has applied a policy.
+ OnProxyPolicyUpdate(policyRevision uint64)
+
+ // UpdateProxyStatistics updates the Endpoint's proxy statistics to account
+ // for a new observed flow with the given characteristics.
+ UpdateProxyStatistics(proxyType, l4Protocol string, port uint16, ingress, request bool, verdict accesslog.FlowVerdict)
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/endpoint/regeneration/regeneration_context.go b/vendor/github.com/cilium/cilium/pkg/endpoint/regeneration/regeneration_context.go
new file mode 100644
index 000000000..84cdc63de
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/endpoint/regeneration/regeneration_context.go
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package regeneration
+
+import (
+ "context"
+)
+
+// DatapathRegenerationLevel determines what is expected of the datapath when
+// a regeneration event is processed.
+type DatapathRegenerationLevel int
+
+const (
+ // Invalid is the default level to enforce explicit setting of
+ // the regeneration level.
+ Invalid DatapathRegenerationLevel = iota
+ // RegenerateWithoutDatapath indicates that datapath rebuild or reload
+ // is not required to implement this regeneration.
+ RegenerateWithoutDatapath
+ // RegenerateWithDatapathLoad indicates that the datapath must be
+ // reloaded but not recompiled to implement this regeneration.
+ RegenerateWithDatapathLoad
+ // RegenerateWithDatapathRewrite indicates that the datapath must be
+ // recompiled and reloaded to implement this regeneration.
+ RegenerateWithDatapathRewrite
+ // RegenerateWithDatapathRebuild indicates that the datapath must be
+ // fully recompiled and reloaded without using any cached templates.
+ RegenerateWithDatapathRebuild
+)
+
+// String converts a DatapathRegenerationLevel into a human-readable string.
+func (r DatapathRegenerationLevel) String() string {
+ switch r {
+ case Invalid:
+ return "invalid"
+ case RegenerateWithoutDatapath:
+ return "no-rebuild"
+ case RegenerateWithDatapathLoad:
+ return "reload"
+ case RegenerateWithDatapathRewrite:
+ return "rewrite+load"
+ case RegenerateWithDatapathRebuild:
+ return "compile+load"
+ default:
+ break
+ }
+ return "BUG: Unknown DatapathRegenerationLevel"
+}
+
+// ExternalRegenerationMetadata contains any information about a regeneration that
+// the endpoint subsystem should be made aware of for a given endpoint.
+type ExternalRegenerationMetadata struct {
+ // Reason provides context to source for the regeneration, which is
+ // used to generate useful log messages.
+ Reason string
+
+ // RegenerationLevel forces datapath regeneration according to the
+ // levels defined in the DatapathRegenerationLevel description.
+ RegenerationLevel DatapathRegenerationLevel
+
+ ParentContext context.Context
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/eventqueue/doc.go b/vendor/github.com/cilium/cilium/pkg/eventqueue/doc.go
new file mode 100644
index 000000000..62b82c19d
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/eventqueue/doc.go
@@ -0,0 +1,6 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Package eventqueue implements a queue-based system for event processing in a
+// generic fashion in a first-in, first-out manner.
+package eventqueue
diff --git a/vendor/github.com/cilium/cilium/pkg/eventqueue/eventqueue.go b/vendor/github.com/cilium/cilium/pkg/eventqueue/eventqueue.go
new file mode 100644
index 000000000..e4bd753c7
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/eventqueue/eventqueue.go
@@ -0,0 +1,314 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package eventqueue
+
+import (
+ "fmt"
+ "reflect"
+ "sync"
+ "sync/atomic"
+
+ "github.com/sirupsen/logrus"
+
+ "github.com/cilium/cilium/pkg/lock"
+ "github.com/cilium/cilium/pkg/logging"
+ "github.com/cilium/cilium/pkg/logging/logfields"
+ "github.com/cilium/cilium/pkg/option"
+ "github.com/cilium/cilium/pkg/spanstat"
+)
+
+var (
+ log = logging.DefaultLogger.WithField(logfields.LogSubsys, "eventqueue")
+)
+
+// EventQueue is a structure which is utilized to handle Events in a first-in,
+// first-out order. An EventQueue may be closed, in which case all events which
+// are queued up, but have not been processed yet, will be cancelled (i.e., not
+// ran). It is guaranteed that no events will be scheduled onto an EventQueue
+// after it has been closed; if any event is attempted to be scheduled onto an
+// EventQueue after it has been closed, it will be cancelled immediately. For
+// any event to be processed by the EventQueue, it must implement the
+// `EventHandler` interface. This allows for different types of events to be
+// processed by anything which chooses to utilize an `EventQueue`.
+type EventQueue struct {
+ // events represents the queue of events. This should always be a buffered
+ // channel.
+ events chan *Event
+
+ // close is closed once the EventQueue has been closed.
+ close chan struct{}
+
+ // drain is closed when the EventQueue is stopped. Any Event which is
+ // Enqueued after this channel is closed will be cancelled / not processed
+ // by the queue. If an Event has been Enqueued, but has not been processed
+ // before this channel is closed, it will be cancelled and not processed
+ // as well.
+ drain chan struct{}
+
+ // eventQueueOnce is used to ensure that the EventQueue business logic can
+ // only be ran once.
+ eventQueueOnce sync.Once
+
+ // closeOnce is used to ensure that the EventQueue can only be closed once.
+ closeOnce sync.Once
+
+ // name is used to differentiate this EventQueue from other EventQueues that
+ // are also running in logs
+ name string
+
+ eventsMu lock.RWMutex
+
+ // eventsClosed is a channel that's closed when the event loop (Run())
+ // terminates.
+ eventsClosed chan struct{}
+}
+
+// NewEventQueue returns an EventQueue with a capacity for only one event at
+// a time.
+func NewEventQueue() *EventQueue {
+ return NewEventQueueBuffered("", 1)
+}
+
+// NewEventQueueBuffered returns an EventQueue with a capacity of,
+// numBufferedEvents at a time, and all other needed fields initialized.
+func NewEventQueueBuffered(name string, numBufferedEvents int) *EventQueue {
+ log.WithFields(logrus.Fields{
+ "name": name,
+ "numBufferedEvents": numBufferedEvents,
+ }).Debug("creating new EventQueue")
+ return &EventQueue{
+ name: name,
+ // Up to numBufferedEvents can be Enqueued until Enqueueing blocks.
+ events: make(chan *Event, numBufferedEvents),
+ close: make(chan struct{}),
+ drain: make(chan struct{}),
+ eventsClosed: make(chan struct{}),
+ }
+}
+
+// Enqueue pushes the given event onto the EventQueue. If the queue has been
+// stopped, the Event will not be enqueued, and its cancel channel will be
+// closed, indicating that the Event was not ran. This function may block if
+// the queue is at its capacity for events. If a single Event has Enqueue
+// called on it multiple times asynchronously, there is no guarantee as to
+// which one will return the channel which passes results back to the caller.
+// It is up to the caller to check whether the returned channel is nil, as
+// waiting to receive on such a channel will block forever. Returns an error
+// if the Event has been previously enqueued, if the Event is nil, or the queue
+// itself is not initialized properly.
+func (q *EventQueue) Enqueue(ev *Event) (<-chan interface{}, error) {
+ if q.notSafeToAccess() || ev == nil {
+ return nil, fmt.Errorf("unable to Enqueue event")
+ }
+
+ // Events can only be enqueued once.
+ if !ev.enqueued.CompareAndSwap(false, true) {
+ return nil, fmt.Errorf("unable to Enqueue event; event has already had Enqueue called on it")
+ }
+
+ // Multiple Enqueues can occur at the same time. Ensure that events channel
+ // is not closed while we are enqueueing events.
+ q.eventsMu.RLock()
+ defer q.eventsMu.RUnlock()
+
+ select {
+ // The event should be drained from the queue (e.g., it should not be
+ // processed).
+ case <-q.drain:
+ // Closed eventResults channel signifies cancellation.
+ close(ev.cancelled)
+ close(ev.eventResults)
+
+ return ev.eventResults, nil
+ default:
+ // The events channel may be closed even if an event has been pushed
+ // onto the events channel, as events are consumed off of the events
+ // channel asynchronously! If the EventQueue is closed before this
+ // event is processed, then it will be cancelled.
+
+ ev.stats.waitEnqueue.Start()
+ ev.stats.waitConsumeOffQueue.Start()
+ q.events <- ev
+ ev.stats.waitEnqueue.End(true)
+ return ev.eventResults, nil
+ }
+}
+
+// Event is an event that can be enqueued onto an EventQueue.
+type Event struct {
+ // Metadata is the information about the event which is sent
+ // by its queuer. Metadata must implement the EventHandler interface in
+ // order for the Event to be successfully processed by the EventQueue.
+ Metadata EventHandler
+
+ // eventResults is a channel on which the results of the event are sent.
+ // It is populated by the EventQueue itself, not by the queuer. This channel
+ // is closed if the event is cancelled.
+ eventResults chan interface{}
+
+ // cancelled signals that the given Event was not ran. This can happen
+ // if the EventQueue processing this Event was closed before the Event was
+ // Enqueued onto the Event queue, or if the Event was Enqueued onto an
+ // EventQueue, and the EventQueue on which the Event was scheduled was
+ // closed.
+ cancelled chan struct{}
+
+ // stats is a field which contains information about when this event is
+ // enqueued, dequeued, etc.
+ stats eventStatistics
+
+ // enqueued specifies whether this event has been enqueued on an EventQueue.
+ enqueued atomic.Bool
+}
+
+type eventStatistics struct {
+
+ // waitEnqueue shows how long a given event was waiting on the queue before
+ // it was actually processed.
+ waitEnqueue spanstat.SpanStat
+
+ // durationStat shows how long the actual processing of the event took. This
+ // is the time for how long Handle() takes for the event.
+ durationStat spanstat.SpanStat
+
+ // waitConsumeOffQueue shows how long it took for the event to be consumed
+ // plus the time it the event waited in the queue.
+ waitConsumeOffQueue spanstat.SpanStat
+}
+
+// NewEvent returns an Event with all fields initialized.
+func NewEvent(meta EventHandler) *Event {
+ return &Event{
+ Metadata: meta,
+ eventResults: make(chan interface{}, 1),
+ cancelled: make(chan struct{}),
+ stats: eventStatistics{},
+ }
+}
+
+// WasCancelled returns whether the cancelled channel for the given Event has
+// been closed or not. Cancellation occurs if the event was not processed yet
+// by an EventQueue onto which this Event was Enqueued, and the queue is closed,
+// or if the event was attempted to be scheduled onto an EventQueue which has
+// already been closed.
+func (ev *Event) WasCancelled() bool {
+ select {
+ case <-ev.cancelled:
+ return true
+ default:
+ return false
+ }
+}
+
+func (ev *Event) printStats(q *EventQueue) {
+ if option.Config.Debug {
+ q.getLogger().WithFields(logrus.Fields{
+ "eventType": reflect.TypeOf(ev.Metadata).String(),
+ "eventHandlingDuration": ev.stats.durationStat.Total(),
+ "eventEnqueueWaitTime": ev.stats.waitEnqueue.Total(),
+ "eventConsumeOffQueueWaitTime": ev.stats.waitConsumeOffQueue.Total(),
+ }).Debug("EventQueue event processing statistics")
+ }
+}
+
+// Run consumes events that have been queued for this EventQueue. It
+// is presumed that the eventQueue is a buffered channel with a length of one
+// (i.e., only one event can be processed at a time). All business logic for
+// handling queued events is contained within this function. The events in the
+// queue must implement the EventHandler interface. If the event queue is
+// closed, then all events which were queued up, but not processed, are
+// cancelled; any event which is currently being processed will not be
+// cancelled.
+func (q *EventQueue) Run() {
+ if q.notSafeToAccess() {
+ return
+ }
+
+ go q.run()
+}
+
+func (q *EventQueue) run() {
+ q.eventQueueOnce.Do(func() {
+ defer close(q.eventsClosed)
+ for ev := range q.events {
+ select {
+ case <-q.drain:
+ ev.stats.waitConsumeOffQueue.End(false)
+ close(ev.cancelled)
+ close(ev.eventResults)
+ ev.printStats(q)
+ default:
+ ev.stats.waitConsumeOffQueue.End(true)
+ ev.stats.durationStat.Start()
+ ev.Metadata.Handle(ev.eventResults)
+ // Always indicate success for now.
+ ev.stats.durationStat.End(true)
+ // Ensures that no more results can be sent as the event has
+ // already been processed.
+ ev.printStats(q)
+ close(ev.eventResults)
+ }
+ }
+ })
+}
+
+func (q *EventQueue) notSafeToAccess() bool {
+ return q == nil || q.close == nil || q.drain == nil || q.events == nil
+}
+
+// Stop stops any further events from being processed by the EventQueue. Any
+// event which is currently being processed by the EventQueue will continue to
+// run. All other events waiting to be processed, and all events that may be
+// enqueued will not be processed by the event queue; they will be cancelled.
+// If the queue has already been stopped, this is a no-op.
+func (q *EventQueue) Stop() {
+ if q.notSafeToAccess() {
+ return
+ }
+
+ q.closeOnce.Do(func() {
+ q.getLogger().Debug("stopping EventQueue")
+ // Any event that is sent to the queue at this point will be cancelled
+ // immediately in Enqueue().
+ close(q.drain)
+
+ // Signal that the queue has been drained.
+ close(q.close)
+
+ q.eventsMu.Lock()
+ close(q.events)
+ q.eventsMu.Unlock()
+ })
+}
+
+// WaitToBeDrained returns the channel which waits for the EventQueue to have been
+// stopped. This allows for queuers to ensure that all events in the queue have
+// been processed or cancelled. If the queue is nil, returns immediately.
+func (q *EventQueue) WaitToBeDrained() {
+ if q == nil {
+ return
+ }
+ <-q.close
+
+ // If the queue is running, then in-flight events may still be ongoing.
+ // Wait for them to be completed for the queue to be fully drained. If the
+ // queue is not running, we must forcefully run it because nothing else
+ // will so that it can be drained.
+ go q.run()
+ <-q.eventsClosed
+}
+
+func (q *EventQueue) getLogger() *logrus.Entry {
+ return log.WithFields(
+ logrus.Fields{
+ "name": q.name,
+ })
+}
+
+// EventHandler is an interface for allowing an EventQueue to handle events
+// in a generic way. To be processed by the EventQueue, all event types must
+// implement any function specified in this interface.
+type EventHandler interface {
+ Handle(chan interface{})
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/fqdn/dns/dns.go b/vendor/github.com/cilium/cilium/pkg/fqdn/dns/dns.go
new file mode 100644
index 000000000..1c089a785
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/fqdn/dns/dns.go
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Based on code from github.com/miekg/dns which is:
+//
+// Copyright 2009 The Go Authors. All rights reserved.
+// Copyright 2011 Miek Gieben. All rights reserved.
+// Copyright 2014 CloudFlare. All rights reserved.
+
+package dns
+
+import "strings"
+
+// These functions were copied and adapted from github.com/miekg/dns.
+
+// isFQDN reports whether the domain name s is fully qualified.
+func isFQDN(s string) bool {
+ s2 := strings.TrimSuffix(s, ".")
+ if s == s2 {
+ return false
+ }
+
+ i := strings.LastIndexFunc(s2, func(r rune) bool {
+ return r != '\\'
+ })
+
+ // Test whether we have an even number of escape sequences before
+ // the dot or none.
+ return (len(s2)-i)%2 != 0
+}
+
+// FQDN returns the fully qualified domain name from s.
+// If s is already fully qualified, it behaves as the identity function.
+func FQDN(s string) string {
+ if isFQDN(s) {
+ return strings.ToLower(s)
+ }
+ return strings.ToLower(s) + "."
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/fqdn/matchpattern/matchpattern.go b/vendor/github.com/cilium/cilium/pkg/fqdn/matchpattern/matchpattern.go
new file mode 100644
index 000000000..68e083175
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/fqdn/matchpattern/matchpattern.go
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package matchpattern
+
+import (
+ "errors"
+ "regexp"
+ "strings"
+
+ "github.com/cilium/cilium/pkg/fqdn/dns"
+ "github.com/cilium/cilium/pkg/fqdn/re"
+)
+
+const allowedDNSCharsREGroup = "[-a-zA-Z0-9_]"
+
+// MatchAllAnchoredPattern is the simplest pattern that match all inputs. This resulting
+// parsed regular expression is the same as an empty string regex (""), but this
+// value is easier to reason about when serializing to and from json.
+const MatchAllAnchoredPattern = "(?:)"
+
+// MatchAllUnAnchoredPattern is the same as MatchAllAnchoredPattern, except that
+// it can be or-ed (joined with "|") with other rules, and still match all rules.
+const MatchAllUnAnchoredPattern = ".*"
+
+// Validate ensures that pattern is a parseable matchPattern. It returns the
+// regexp generated when validating.
+func Validate(pattern string) (matcher *regexp.Regexp, err error) {
+ if err := prevalidate(pattern); err != nil {
+ return nil, err
+ }
+ return re.CompileRegex(ToAnchoredRegexp(pattern))
+}
+
+// ValidateWithoutCache is the same as Validate() but doesn't consult the regex
+// LRU.
+func ValidateWithoutCache(pattern string) (matcher *regexp.Regexp, err error) {
+ if err := prevalidate(pattern); err != nil {
+ return nil, err
+ }
+ return regexp.Compile(ToAnchoredRegexp(pattern))
+}
+
+func prevalidate(pattern string) error {
+ pattern = strings.TrimSpace(pattern)
+ pattern = strings.ToLower(pattern)
+
+ // error check
+ if strings.ContainsAny(pattern, "[]+{},") {
+ return errors.New(`Only alphanumeric ASCII characters, the hyphen "-", underscore "_", "." and "*" are allowed in a matchPattern`)
+ }
+
+ return nil
+}
+
+// Sanitize canonicalized the pattern for use by ToAnchoredRegexp
+func Sanitize(pattern string) string {
+ if pattern == "*" {
+ return pattern
+ }
+
+ return dns.FQDN(pattern)
+}
+
+// ToAnchoredRegexp converts a MatchPattern field into a regexp string. It does not
+// validate the pattern. It also adds anchors to ensure it match the whole string.
+// It supports:
+// * to select 0 or more DNS valid characters
+func ToAnchoredRegexp(pattern string) string {
+ pattern = strings.TrimSpace(pattern)
+ pattern = strings.ToLower(pattern)
+
+ // handle the * match-all case. This will filter down to the end.
+ if pattern == "*" {
+ return "(^(" + allowedDNSCharsREGroup + "+[.])+$)|(^[.]$)"
+ }
+
+ pattern = escapeRegexpCharacters(pattern)
+
+ // Anchor the match to require the whole string to match this expression
+ return "^" + pattern + "$"
+}
+
+// ToUnAnchoredRegexp converts a MatchPattern field into a regexp string. It does not
+// validate the pattern. It does not add regexp anchors.
+// It supports:
+// * to select 0 or more DNS valid characters
+func ToUnAnchoredRegexp(pattern string) string {
+ pattern = strings.TrimSpace(pattern)
+ pattern = strings.ToLower(pattern)
+ // handle the * match-all case. This will filter down to the end.
+ if pattern == "*" {
+ return MatchAllUnAnchoredPattern
+ }
+ pattern = escapeRegexpCharacters(pattern)
+ return pattern
+}
+
+func escapeRegexpCharacters(pattern string) string {
+ // base case. "." becomes a literal .
+ pattern = strings.Replace(pattern, ".", "[.]", -1)
+
+ // base case. * becomes .*, but only for DNS valid characters
+ // NOTE: this only works because the case above does not leave the *
+ pattern = strings.Replace(pattern, "*", allowedDNSCharsREGroup+"*", -1)
+ return pattern
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/fqdn/re/re.go b/vendor/github.com/cilium/cilium/pkg/fqdn/re/re.go
new file mode 100644
index 000000000..73dd06339
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/fqdn/re/re.go
@@ -0,0 +1,81 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Package re provides a simple function to access compile regex objects for
+// the FQDN subsystem.
+package re
+
+import (
+ "errors"
+ "fmt"
+ "regexp"
+ "sync/atomic"
+
+ lru "github.com/golang/groupcache/lru"
+
+ "github.com/cilium/cilium/pkg/lock"
+ "github.com/cilium/cilium/pkg/logging"
+ "github.com/cilium/cilium/pkg/logging/logfields"
+ "github.com/cilium/cilium/pkg/option"
+)
+
+var (
+ log = logging.DefaultLogger.WithField(logfields.LogSubsys, "fqdn/re")
+)
+
+// CompileRegex compiles a pattern p into a regex and returns the regex object.
+// The regex object will be cached by an LRU. If p has already been compiled
+// and cached, this function will return the cached regex object. If not
+// already cached, it will compile p into a regex object and cache it in the
+// LRU. This function will return an error if the LRU has not already been
+// initialized.
+func CompileRegex(p string) (*regexp.Regexp, error) {
+ lru := regexCompileLRU.Load()
+ if lru == nil {
+ return nil, errors.New("FQDN regex compilation LRU not yet initialized")
+ }
+ lru.Lock()
+ r, ok := lru.Get(p)
+ lru.Unlock()
+ if ok {
+ return r.(*regexp.Regexp), nil
+ }
+ n, err := regexp.Compile(p)
+ if err != nil {
+ return nil, fmt.Errorf("failed to compile regex: %w", err)
+ }
+ lru.Lock()
+ lru.Add(p, n)
+ lru.Unlock()
+ return n, nil
+}
+
+// InitRegexCompileLRU creates a new instance of the regex compilation LRU.
+func InitRegexCompileLRU(size int) error {
+ if size < 0 {
+ return fmt.Errorf("failed to initialize FQDN regex compilation LRU due to invalid size %d", size)
+ } else if size == 0 {
+ log.Warnf(
+ "FQDN regex compilation LRU size is unlimited, which can grow unbounded potentially consuming too much memory. Consider passing a maximum size via --%s.",
+ option.FQDNRegexCompileLRUSize)
+ }
+ regexCompileLRU.Store(&RegexCompileLRU{
+ Mutex: &lock.Mutex{},
+ Cache: lru.New(size),
+ })
+ return nil
+}
+
+// regexCompileLRU is the singleton instance of the LRU that's shared
+// throughout Cilium.
+var regexCompileLRU atomic.Pointer[RegexCompileLRU]
+
+// RegexCompileLRU is an LRU cache for storing compiled regex objects of FQDN
+// names or patterns, used in CiliumNetworkPolicy or
+// ClusterwideCiliumNetworkPolicy.
+type RegexCompileLRU struct {
+ // The lru package doesn't provide any concurrency guarantees so we must
+ // provide our own locking.
+ *lock.Mutex
+ *lru.Cache
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/fqdn/restore/restore.go b/vendor/github.com/cilium/cilium/pkg/fqdn/restore/restore.go
new file mode 100644
index 000000000..769d3adb5
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/fqdn/restore/restore.go
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// The restore package provides data structures important to restoring
+// DNS proxy rules. This package serves as a central source for these
+// structures.
+// Note that these are marshaled as JSON and any changes need to be compatible
+// across an upgrade!
+package restore
+
+import (
+ "sort"
+)
+
+// DNSRules contains IP-based DNS rules for a set of ports (e.g., 53)
+type DNSRules map[uint16]IPRules
+
+// IPRules is an unsorted collection of IPrules
+type IPRules []IPRule
+
+// IPRule stores the allowed destination IPs for a DNS names matching a regex
+type IPRule struct {
+ Re RuleRegex
+ IPs map[string]struct{} // IPs, nil set is wildcard and allows all IPs!
+}
+
+// RuleRegex is a wrapper for a pointer to a string so that we can define marshalers for it.
+type RuleRegex struct {
+ Pattern *string
+}
+
+// Sort is only used for testing
+// Sorts in place, but returns IPRules for convenience
+func (r IPRules) Sort() IPRules {
+ sort.SliceStable(r, func(i, j int) bool {
+ if r[i].Re.Pattern != nil && r[j].Re.Pattern != nil {
+ return *r[i].Re.Pattern < *r[j].Re.Pattern
+ }
+ if r[i].Re.Pattern != nil {
+ return true
+ }
+ return false
+ })
+
+ return r
+}
+
+// Sort is only used for testing
+// Sorts in place, but returns DNSRules for convenience
+func (r DNSRules) Sort() DNSRules {
+ for port, ipRules := range r {
+ if len(ipRules) > 0 {
+ ipRules = ipRules.Sort()
+ r[port] = ipRules
+ }
+ }
+ return r
+}
+
+// UnmarshalText unmarshals json into a RuleRegex
+// This must have a pointer receiver, otherwise the RuleRegex remains empty.
+func (r *RuleRegex) UnmarshalText(b []byte) error {
+ pattern := string(b)
+ r.Pattern = &pattern
+ return nil
+}
+
+// MarshalText marshals RuleRegex as string
+func (r RuleRegex) MarshalText() ([]byte, error) {
+ if r.Pattern != nil {
+ return []byte(*r.Pattern), nil
+ }
+ return nil, nil
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/health/client/client.go b/vendor/github.com/cilium/cilium/pkg/health/client/client.go
new file mode 100644
index 000000000..dd943633b
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/health/client/client.go
@@ -0,0 +1,424 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package client
+
+import (
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "net/url"
+ "os"
+ "sort"
+ "strings"
+ "time"
+
+ runtime_client "github.com/go-openapi/runtime/client"
+ "github.com/go-openapi/strfmt"
+
+ clientapi "github.com/cilium/cilium/api/v1/health/client"
+ "github.com/cilium/cilium/api/v1/health/models"
+ "github.com/cilium/cilium/pkg/health/defaults"
+)
+
+type ConnectivityStatusType int
+
+const (
+ ipUnavailable = "Unavailable"
+
+ ConnStatusReachable ConnectivityStatusType = 0
+ ConnStatusUnreachable ConnectivityStatusType = 1
+ ConnStatusUnknown ConnectivityStatusType = 2
+)
+
+func (c ConnectivityStatusType) String() string {
+ switch c {
+ case ConnStatusReachable:
+ return "reachable"
+ case ConnStatusUnreachable:
+ return "unreachable"
+ default:
+ return "unknown"
+ }
+}
+
+// Client is a client for cilium health
+type Client struct {
+ clientapi.CiliumHealthAPI
+}
+
+func configureTransport(tr *http.Transport, proto, addr string) *http.Transport {
+ if tr == nil {
+ tr = &http.Transport{}
+ }
+
+ if proto == "unix" {
+ // No need for compression in local communications.
+ tr.DisableCompression = true
+ tr.Dial = func(_, _ string) (net.Conn, error) {
+ return net.Dial(proto, addr)
+ }
+ } else {
+ tr.Proxy = http.ProxyFromEnvironment
+ tr.Dial = (&net.Dialer{}).Dial
+ }
+
+ return tr
+}
+
+// NewDefaultClient creates a client with default parameters connecting to UNIX domain socket.
+func NewDefaultClient() (*Client, error) {
+ return NewClient("")
+}
+
+// NewClient creates a client for the given `host`.
+func NewClient(host string) (*Client, error) {
+ if host == "" {
+ // Check if environment variable points to socket
+ e := os.Getenv(defaults.SockPathEnv)
+ if e == "" {
+ // If unset, fall back to default value
+ e = defaults.SockPath
+ }
+ host = "unix://" + e
+ }
+ tmp := strings.SplitN(host, "://", 2)
+ if len(tmp) != 2 {
+ return nil, fmt.Errorf("invalid host format '%s'", host)
+ }
+
+ hostHeader := tmp[1]
+
+ switch tmp[0] {
+ case "tcp":
+ if _, err := url.Parse("tcp://" + tmp[1]); err != nil {
+ return nil, err
+ }
+ host = "http://" + tmp[1]
+ case "unix":
+ host = tmp[1]
+ // For local communication (unix domain sockets), the hostname is not used. Leave
+ // Host header empty because otherwise it would be rejected by net/http client-side
+ // sanitization, see https://go.dev/issue/60374.
+ hostHeader = "localhost"
+ }
+
+ transport := configureTransport(nil, tmp[0], host)
+ httpClient := &http.Client{Transport: transport}
+ clientTrans := runtime_client.NewWithClient(hostHeader, clientapi.DefaultBasePath,
+ clientapi.DefaultSchemes, httpClient)
+ return &Client{*clientapi.New(clientTrans, strfmt.Default)}, nil
+}
+
+// Hint tries to improve the error message displayed to the user.
+func Hint(err error) error {
+ if err == nil {
+ return err
+ }
+ e, _ := url.PathUnescape(err.Error())
+ if strings.Contains(err.Error(), defaults.SockPath) {
+ return fmt.Errorf("%s\nIs the agent running?", e)
+ }
+ return fmt.Errorf("%s", e)
+}
+
+func GetConnectivityStatusType(cs *models.ConnectivityStatus) ConnectivityStatusType {
+ // If the connecticity status is nil, it means that there was no
+ // successful probe, but also no failed probe with a concrete reason. In
+ // that case, the status is unknown and it usually means that the new
+ // is still in the beginning of the bootstraping process.
+ if cs == nil {
+ return ConnStatusUnknown
+ }
+ // Empty status means successful probe.
+ if cs.Status == "" {
+ return ConnStatusReachable
+ }
+ // Non-empty status means that there was an explicit reason of failure.
+ return ConnStatusUnreachable
+}
+
+func GetPathConnectivityStatusType(cp *models.PathStatus) ConnectivityStatusType {
+ if cp == nil {
+ return ConnStatusUnreachable
+ }
+ statuses := []*models.ConnectivityStatus{
+ cp.Icmp,
+ cp.HTTP,
+ }
+ // Initially assume healthy status.
+ status := ConnStatusReachable
+ for _, cs := range statuses {
+ switch GetConnectivityStatusType(cs) {
+ case ConnStatusUnreachable:
+ // If any status is unreachable, return it immediately.
+ return ConnStatusUnreachable
+ case ConnStatusUnknown:
+ // If the status is unknown, prepare to return it. It's
+ // going to be returned if there is no unreachable
+ // status in next iterations.
+ status = ConnStatusUnknown
+ }
+ }
+ return status
+}
+
+func SummarizePathConnectivityStatusType(cps []*models.PathStatus) ConnectivityStatusType {
+ status := ConnStatusReachable
+ for _, cp := range cps {
+ switch GetPathConnectivityStatusType(cp) {
+ case ConnStatusUnreachable:
+ // If any status is unreachable, return it immediately.
+ return ConnStatusUnreachable
+ case ConnStatusUnknown:
+ // If the status is unknown, prepare to return it. It's
+ // going to be returned if there is no unreachable
+ // status in next iterations.
+ status = ConnStatusUnknown
+ }
+ }
+ return status
+}
+
+func formatConnectivityStatus(w io.Writer, cs *models.ConnectivityStatus, path, indent string) {
+ status := cs.Status
+ switch GetConnectivityStatusType(cs) {
+ case ConnStatusReachable:
+ latency := time.Duration(cs.Latency)
+ status = fmt.Sprintf("OK, RTT=%s", latency)
+ }
+ fmt.Fprintf(w, "%s%s:\t%s\n", indent, path, status)
+}
+
+func formatPathStatus(w io.Writer, name string, cp *models.PathStatus, indent string, verbose bool) {
+ if cp == nil {
+ if verbose {
+ fmt.Fprintf(w, "%s%s connectivity:\tnil\n", indent, name)
+ }
+ return
+ }
+ fmt.Fprintf(w, "%s%s connectivity to %s:\n", indent, name, cp.IP)
+ indent = fmt.Sprintf("%s ", indent)
+
+ if cp.Icmp != nil {
+ formatConnectivityStatus(w, cp.Icmp, "ICMP to stack", indent)
+ }
+ if cp.HTTP != nil {
+ formatConnectivityStatus(w, cp.HTTP, "HTTP to agent", indent)
+ }
+}
+
+// allPathsAreHealthyOrUnknown checks whether ICMP and TCP(HTTP) connectivity
+// to the given paths is available or had no explicit error status
+// (which usually is the case when the new node is provisioned).
+func allPathsAreHealthyOrUnknown(cps []*models.PathStatus) bool {
+ for _, cp := range cps {
+ if cp == nil {
+ return false
+ }
+
+ statuses := []*models.ConnectivityStatus{
+ cp.Icmp,
+ cp.HTTP,
+ }
+ for _, status := range statuses {
+ switch GetConnectivityStatusType(status) {
+ case ConnStatusUnreachable:
+ return false
+ }
+ }
+ }
+ return true
+}
+
+func nodeIsHealthy(node *models.NodeStatus) bool {
+ return allPathsAreHealthyOrUnknown(GetAllHostAddresses(node)) &&
+ allPathsAreHealthyOrUnknown(GetAllEndpointAddresses(node))
+}
+
+func nodeIsLocalhost(node *models.NodeStatus, self *models.SelfStatus) bool {
+ return self != nil && node.Name == self.Name
+}
+
+func getPrimaryAddressIP(node *models.NodeStatus) string {
+ if node.Host == nil || node.Host.PrimaryAddress == nil {
+ return ipUnavailable
+ }
+
+ return node.Host.PrimaryAddress.IP
+}
+
+// GetHostPrimaryAddress returns the PrimaryAddress for the Host within node.
+// If node.Host is nil, returns nil.
+func GetHostPrimaryAddress(node *models.NodeStatus) *models.PathStatus {
+ if node.Host == nil {
+ return nil
+ }
+
+ return node.Host.PrimaryAddress
+}
+
+// GetHostSecondaryAddresses returns the secondary host addresses (if any)
+func GetHostSecondaryAddresses(node *models.NodeStatus) []*models.PathStatus {
+ if node.Host == nil {
+ return nil
+ }
+
+ return node.Host.SecondaryAddresses
+}
+
+// GetAllHostAddresses returns a list of all addresses (primary and any
+// and any secondary) for the host of a given node. If node.Host is nil,
+// returns nil.
+func GetAllHostAddresses(node *models.NodeStatus) []*models.PathStatus {
+ if node.Host == nil {
+ return nil
+ }
+
+ return append([]*models.PathStatus{node.Host.PrimaryAddress}, node.Host.SecondaryAddresses...)
+}
+
+// GetEndpointPrimaryAddress returns the PrimaryAddress for the health endpoint
+// within node. If node.HealthEndpoint is nil, returns nil.
+func GetEndpointPrimaryAddress(node *models.NodeStatus) *models.PathStatus {
+ if node.HealthEndpoint == nil {
+ return nil
+ }
+
+ return node.HealthEndpoint.PrimaryAddress
+}
+
+// GetEndpointSecondaryAddresses returns the secondary health endpoint addresses
+// (if any)
+func GetEndpointSecondaryAddresses(node *models.NodeStatus) []*models.PathStatus {
+ if node.HealthEndpoint == nil {
+ return nil
+ }
+
+ return node.HealthEndpoint.SecondaryAddresses
+}
+
+// GetAllEndpointAddresses returns a list of all addresses (primary and any
+// secondary) for the health endpoint within a given node.
+// If node.HealthEndpoint is nil, returns nil.
+func GetAllEndpointAddresses(node *models.NodeStatus) []*models.PathStatus {
+ if node.HealthEndpoint == nil {
+ return nil
+ }
+
+ return append([]*models.PathStatus{node.HealthEndpoint.PrimaryAddress}, node.HealthEndpoint.SecondaryAddresses...)
+}
+
+func formatNodeStatus(w io.Writer, node *models.NodeStatus, printAll, succinct, verbose, localhost bool) {
+ localStr := ""
+ if localhost {
+ localStr = " (localhost)"
+ }
+ if succinct {
+ if printAll || !nodeIsHealthy(node) {
+ ips := []string{getPrimaryAddressIP(node)}
+ for _, addr := range GetHostSecondaryAddresses(node) {
+ if addr == nil {
+ continue
+ }
+ ips = append(ips, addr.IP)
+ }
+ fmt.Fprintf(w, " %s%s\t%s\t%s\t%s\n", node.Name,
+ localStr, strings.Join(ips, ","),
+ SummarizePathConnectivityStatusType(GetAllHostAddresses(node)).String(),
+ SummarizePathConnectivityStatusType(GetAllEndpointAddresses(node)).String())
+ }
+ } else {
+ fmt.Fprintf(w, " %s%s:\n", node.Name, localStr)
+ formatPathStatus(w, "Host", GetHostPrimaryAddress(node), " ", verbose)
+ unhealthyPaths := !allPathsAreHealthyOrUnknown(GetHostSecondaryAddresses(node))
+ if (verbose || unhealthyPaths) && node.Host != nil {
+ for _, addr := range node.Host.SecondaryAddresses {
+ formatPathStatus(w, "Secondary", addr, " ", verbose)
+ }
+ }
+ formatPathStatus(w, "Endpoint", GetEndpointPrimaryAddress(node), " ", verbose)
+ unhealthyPaths = !allPathsAreHealthyOrUnknown(GetEndpointSecondaryAddresses(node))
+ if (verbose || unhealthyPaths) && node.HealthEndpoint != nil {
+ for _, addr := range node.HealthEndpoint.SecondaryAddresses {
+ formatPathStatus(w, "Secondary", addr, " ", verbose)
+ }
+ }
+ }
+}
+
+// FormatHealthStatusResponse writes a HealthStatusResponse as a string to the
+// writer.
+//
+// 'printAll', if true, causes all nodes to be printed regardless of status
+// 'succinct', if true, causes node health to be output as one line per node
+// 'verbose', if true, overrides 'succinct' and prints all information
+// 'maxLines', if nonzero, determines the maximum number of lines to print
+func FormatHealthStatusResponse(w io.Writer, sr *models.HealthStatusResponse, printAll, succinct, verbose bool, maxLines int) {
+ var (
+ healthy int
+ localhost *models.NodeStatus
+ )
+ for _, node := range sr.Nodes {
+ if nodeIsHealthy(node) {
+ healthy++
+ }
+ if nodeIsLocalhost(node, sr.Local) {
+ localhost = node
+ }
+ }
+ if succinct {
+ fmt.Fprintf(w, "Cluster health:\t%d/%d reachable\t(%s)\n",
+ healthy, len(sr.Nodes), sr.Timestamp)
+ if printAll || healthy < len(sr.Nodes) {
+ fmt.Fprintf(w, " Name\tIP\tNode\tEndpoints\n")
+ }
+ } else {
+ fmt.Fprintf(w, "Probe time:\t%s\n", sr.Timestamp)
+ fmt.Fprintf(w, "Nodes:\n")
+ }
+
+ if localhost != nil {
+ formatNodeStatus(w, localhost, printAll, succinct, verbose, true)
+ maxLines--
+ }
+
+ nodes := sr.Nodes
+ sort.Slice(nodes, func(i, j int) bool {
+ return strings.Compare(nodes[i].Name, nodes[j].Name) < 0
+ })
+ for n, node := range nodes {
+ if maxLines > 0 && n > maxLines {
+ break
+ }
+ if node == localhost {
+ continue
+ }
+ formatNodeStatus(w, node, printAll, succinct, verbose, false)
+ }
+ if maxLines > 0 && len(sr.Nodes)-healthy > maxLines {
+ fmt.Fprintf(w, " ...")
+ }
+}
+
+// GetAndFormatHealthStatus fetches the health status from the cilium-health
+// daemon via the default channel and formats its output as a string to the
+// writer.
+//
+// 'succinct', 'verbose' and 'maxLines' are handled the same as in
+// FormatHealthStatusResponse().
+func GetAndFormatHealthStatus(w io.Writer, succinct, verbose bool, maxLines int) {
+ client, err := NewClient("")
+ if err != nil {
+ fmt.Fprintf(w, "Cluster health:\t\t\tClient error: %s\n", err)
+ return
+ }
+ hr, err := client.Connectivity.GetStatus(nil)
+ if err != nil {
+ // The regular `cilium status` output will print the reason why.
+ fmt.Fprintf(w, "Cluster health:\t\t\tWarning\tcilium-health daemon unreachable\n")
+ return
+ }
+ FormatHealthStatusResponse(w, hr.Payload, verbose, succinct, verbose, maxLines)
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/health/client/modules.go b/vendor/github.com/cilium/cilium/pkg/health/client/modules.go
new file mode 100644
index 000000000..4a3477c08
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/health/client/modules.go
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package client
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/cilium/cilium/api/v1/client/daemon"
+ "github.com/cilium/cilium/pkg/hive/cell"
+)
+
+// ModulesHealth represent hive modules health API.
+type ModulesHealth interface {
+ // GetHealth retrieves agent modules health.
+ GetHealth(params *daemon.GetHealthParams, opts ...daemon.ClientOption) (*daemon.GetHealthOK, error)
+}
+
+// GetAndFormatModulesHealth retrieves modules health and formats output.
+func GetAndFormatModulesHealth(w io.Writer, clt ModulesHealth, verbose bool) {
+ fmt.Fprintf(w, "Modules Health:")
+ resp, err := clt.GetHealth(daemon.NewGetHealthParams())
+ if err != nil {
+ fmt.Fprintf(w, "\t%s\n", err)
+ return
+ }
+ if resp.Payload == nil {
+ fmt.Fprintf(w, "\tno health payload detected\n")
+ return
+ }
+ if verbose {
+ fmt.Fprintf(w, "\n Module\tStatus\tMessage\tLast Updated\n")
+ for _, m := range resp.Payload.Modules {
+ fmt.Fprintf(w, " %s\t%s\t%s\t%12s\n", m.ModuleID, m.Level, m.Message, m.LastUpdated)
+ }
+ return
+ }
+ tally := make(map[cell.Level]int, 4)
+ for _, m := range resp.Payload.Modules {
+ tally[cell.Level(m.Level)] += 1
+ }
+ fmt.Fprintf(w, "\t%s(%d) %s(%d) %s(%d) %s(%d)\n",
+ cell.StatusStopped,
+ tally[cell.StatusStopped],
+ cell.StatusDegraded,
+ tally[cell.StatusDegraded],
+ cell.StatusOK,
+ tally[cell.StatusOK],
+ cell.StatusUnknown,
+ tally[cell.StatusUnknown],
+ )
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/health/defaults/defaults.go b/vendor/github.com/cilium/cilium/pkg/health/defaults/defaults.go
new file mode 100644
index 000000000..d84cecd2c
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/health/defaults/defaults.go
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package defaults
+
+import (
+ daemon "github.com/cilium/cilium/pkg/defaults"
+)
+
+const (
+ // SockPath is the path to the UNIX domain socket exposing the API to clients locally
+ SockPath = daemon.RuntimePath + "/health.sock"
+
+ // SockPathEnv is the environment variable to overwrite SockPath
+ SockPathEnv = "CILIUM_HEALTH_SOCK"
+
+ // HTTPPathPort is used for probing base HTTP path connectivity
+ HTTPPathPort = daemon.ClusterHealthPort
+
+ // HealthEPName is the name used for the health endpoint, which is also
+ // used by the CLI client to detect when connectivity health is enabled
+ HealthEPName = "cilium-health-ep"
+)
diff --git a/vendor/github.com/cilium/cilium/pkg/hive/cell/cell.go b/vendor/github.com/cilium/cilium/pkg/hive/cell/cell.go
new file mode 100644
index 000000000..0529919e7
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/hive/cell/cell.go
@@ -0,0 +1,58 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package cell
+
+import (
+ "go.uber.org/dig"
+
+ "github.com/cilium/cilium/pkg/logging"
+ "github.com/cilium/cilium/pkg/logging/logfields"
+)
+
+var (
+ log = logging.DefaultLogger.WithField(logfields.LogSubsys, "hive")
+)
+
+// Cell is the modular building block of the hive.
+//
+// A cell can be constructed with:
+//
+// - Module(): Create a named set of cells.
+// - Provide(): Provide object constructors.
+// - Invoke(): Invoke a function to instantiate objects.
+// - Decorate(): Decorate a set of cells to augment an object.
+// - Config(): Cell providing a configuration struct.
+type Cell interface {
+ // Info provides a structural summary of the cell for printing purposes.
+ Info(container) Info
+
+ // Apply the cell to the dependency graph container.
+ Apply(container) error
+}
+
+// In when embedded into a struct used as constructor parameter makes the exported
+// values of that struct become dependency injected values. In other words, it allows
+// moving a long list of constructor parameters into a struct.
+//
+// Struct fields can be annotated with `optional:"true"` to make the dependency optional.
+// If the type is not found in the dependency graph, the value is set to the zero value.
+//
+// See https://pkg.go.dev/go.uber.org/dig#In for more information.
+type In = dig.In
+
+// Out when embedded into a struct that is returned by a constructor will make the
+// values in the struct become objects in the dependency graph instead of the struct
+// itself.
+//
+// See https://pkg.go.dev/go.uber.org/dig#Out for more information.
+type Out = dig.Out
+
+// container is the common interface between dig.Container and dig.Scope.
+// Used in Apply().
+type container interface {
+ Provide(ctor any, opts ...dig.ProvideOption) error
+ Invoke(fn any, opts ...dig.InvokeOption) error
+ Decorate(fn any, opts ...dig.DecorateOption) error
+ Scope(name string, opts ...dig.ScopeOption) *dig.Scope
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/hive/cell/config.go b/vendor/github.com/cilium/cilium/pkg/hive/cell/config.go
new file mode 100644
index 000000000..860e03be3
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/hive/cell/config.go
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package cell
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+
+ "github.com/mitchellh/mapstructure"
+ "github.com/spf13/pflag"
+ "go.uber.org/dig"
+
+ "github.com/cilium/cilium/pkg/command"
+)
+
+// Config constructs a new config cell.
+//
+// The configuration struct `T` needs to implement the Flags method that
+// registers the flags. The structure is populated and provided via dependency
+// injection by Hive.Run(). The underlying mechanism for populating the struct
+// is viper's Unmarshal().
+func Config[Cfg Flagger](def Cfg) Cell {
+ c := &config[Cfg]{defaultConfig: def, flags: pflag.NewFlagSet("", pflag.ContinueOnError)}
+ def.Flags(c.flags)
+ return c
+}
+
+// Flagger is implemented by configuration structs to provide configuration
+// for a cell.
+type Flagger interface {
+ // Flags registers the configuration options as command-line flags.
+ //
+ // By convention a flag name matches the field name
+ // if they're the same under case-insensitive comparison when dashes are
+ // removed. E.g. "my-config-flag" matches field "MyConfigFlag". The
+ // correspondence to the flag can be also specified with the mapstructure
+ // tag: MyConfigFlag `mapstructure:"my-config-flag"`.
+ //
+ // Exported fields that are not found from the viper settings will cause
+ // hive.Run() to fail. Unexported fields are ignored.
+ //
+ // See https://pkg.go.dev/github.com/mitchellh/mapstructure for more info.
+ Flags(*pflag.FlagSet)
+}
+
+// config is a cell for configuration. It registers the config's command-line
+// flags and provides the parsed config to the hive.
+type config[Cfg Flagger] struct {
+ defaultConfig Cfg
+ flags *pflag.FlagSet
+}
+
+type AllSettings map[string]any
+
+type configParams[Cfg Flagger] struct {
+ dig.In
+ AllSettings AllSettings
+ Override func(*Cfg) `optional:"true"`
+}
+
+func (c *config[Cfg]) provideConfig(p configParams[Cfg]) (Cfg, error) {
+ settings := p.AllSettings
+ target := c.defaultConfig
+ decoder, err := mapstructure.NewDecoder(decoderConfig(&target))
+ if err != nil {
+ return target, fmt.Errorf("failed to create config decoder: %w", err)
+ }
+
+ // As input, only consider the declared flags.
+ input := make(map[string]any)
+
+ c.flags.VisitAll(func(f *pflag.Flag) {
+ if v, ok := settings[f.Name]; ok {
+ input[f.Name] = v
+ } else {
+ err = fmt.Errorf("internal error: %s not found from settings", f.Name)
+ }
+ })
+ if err != nil {
+ return target, err
+ }
+ if err := decoder.Decode(input); err != nil {
+ return target, fmt.Errorf("failed to unmarshal config struct %T: %w.\n"+
+ "Hint: field 'FooBar' matches flag 'foo-bar', or use tag `mapstructure:\"flag-name\"` to match field with flag",
+ target, err)
+ }
+
+ // See if the configuration was overridden with ConfigOverride. We check the override
+ // after the decode to validate that the config struct is properly formed and all
+ // flags are registered.
+ if p.Override != nil {
+ p.Override(&target)
+ }
+
+ return target, nil
+}
+
+func decoderConfig(target any) *mapstructure.DecoderConfig {
+ return &mapstructure.DecoderConfig{
+ Metadata: nil,
+ Result: target,
+ WeaklyTypedInput: true,
+ DecodeHook: mapstructure.ComposeDecodeHookFunc(
+ mapstructure.StringToTimeDurationHookFunc(),
+ mapstructure.StringToSliceHookFunc(","),
+ stringToMapHookFunc(),
+ ),
+ ZeroFields: true,
+ // Error out if the config struct has fields that are
+ // not found from input.
+ ErrorUnset: true,
+ // Error out also if settings from input are not used.
+ ErrorUnused: true,
+ // Match field FooBarBaz with "foo-bar-baz" by removing
+ // the dashes from the flag.
+ MatchName: func(mapKey, fieldName string) bool {
+ return strings.EqualFold(
+ strings.ReplaceAll(mapKey, "-", ""),
+ fieldName)
+ },
+ }
+}
+
+func (c *config[Cfg]) Apply(cont container) error {
+ // Register the flags to the global set of all flags.
+ err := cont.Invoke(
+ func(allFlags *pflag.FlagSet) {
+ allFlags.AddFlagSet(c.flags)
+ })
+ if err != nil {
+ return err
+ }
+ // And provide the constructor for the config.
+ return cont.Provide(c.provideConfig, dig.Export(true))
+}
+
+func (c *config[Cfg]) Info(cont container) (info Info) {
+ cont.Invoke(func(cfg Cfg) {
+ info = &InfoStruct{cfg}
+ })
+ return
+}
+
+// stringToMapHookFunc returns a DecodeHookFunc that converts string
+// to map[string]string supporting both json and KV formats.
+func stringToMapHookFunc() mapstructure.DecodeHookFunc {
+ return func(from reflect.Kind, to reflect.Kind, data interface{}) (interface{}, error) {
+ if from != reflect.String || to != reflect.Map {
+ return data, nil
+ }
+
+ return command.ToStringMapStringE(data.(string))
+ }
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/hive/cell/decorator.go b/vendor/github.com/cilium/cilium/pkg/hive/cell/decorator.go
new file mode 100644
index 000000000..fcba55aa2
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/hive/cell/decorator.go
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package cell
+
+import (
+ "fmt"
+
+ "github.com/cilium/cilium/pkg/hive/internal"
+)
+
+// Decorate takes a decorator function and a set of cells and returns
+// a decorator cell.
+//
+// A decorator function is a function that takes as arguments objects
+// in the hive and returns one or more augmented objects. The cells wrapped
+// with a decorator will be provided the returned augmented objects.
+//
+// Example:
+//
+// cell.Decorate(
+// func(e Example) Example {
+// return e.WithMoreMagic()
+// },
+// cell.Invoke(func(e Example) {
+// // e now has more magic
+// },
+// )
+func Decorate(dtor any, cells ...Cell) Cell {
+ return &decorator{
+ decorator: dtor,
+ cells: cells,
+ }
+}
+
+type decorator struct {
+ decorator any
+ cells []Cell
+}
+
+func (d *decorator) Apply(c container) error {
+ scope := c.Scope(fmt.Sprintf("(decorate %s)", internal.PrettyType(d.decorator)))
+ if err := scope.Decorate(d.decorator); err != nil {
+ return err
+ }
+
+ for _, cell := range d.cells {
+ if err := cell.Apply(scope); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (d *decorator) Info(c container) Info {
+ n := NewInfoNode(fmt.Sprintf("🔀 %s: %s", internal.FuncNameAndLocation(d.decorator), internal.PrettyType(d.decorator)))
+ for _, cell := range d.cells {
+ n.Add(cell.Info(c))
+ }
+ return n
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/hive/cell/group.go b/vendor/github.com/cilium/cilium/pkg/hive/cell/group.go
new file mode 100644
index 000000000..f2e60b12c
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/hive/cell/group.go
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package cell
+
+type group []Cell
+
+// Group a set of cells. Unlike Module(), Group() does not create a new
+// scope.
+func Group(cells ...Cell) Cell {
+ return group(cells)
+}
+
+func (g group) Apply(c container) error {
+ for _, cell := range g {
+ if err := cell.Apply(c); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (g group) Info(c container) Info {
+ n := NewInfoNode("")
+ for _, cell := range g {
+ n.Add(cell.Info(c))
+ }
+ return n
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/hive/cell/health.go b/vendor/github.com/cilium/cilium/pkg/hive/cell/health.go
new file mode 100644
index 000000000..eb6f644e8
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/hive/cell/health.go
@@ -0,0 +1,240 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package cell
+
+import (
+ "context"
+ "fmt"
+ "sort"
+ "sync/atomic"
+ "time"
+
+ "github.com/cilium/cilium/pkg/lock"
+
+ "golang.org/x/exp/maps"
+)
+
+// Level denotes what kind an update is.
+type Level string
+
+const (
+ // StatusUnknown is the default status of a Module, prior to it reporting
+ // any status.
+ // All created
+ StatusUnknown Level = "Unknown"
+
+ // StatusStopped is the status of a Module that has completed, further updates
+ // will not be processed.
+ StatusStopped Level = "Stopped"
+
+ // StatusDegraded is the status of a Module that has entered a degraded state.
+ StatusDegraded Level = "Degraded"
+
+ // StatusOK is the status of a Module that has achieved a desired state.
+ StatusOK Level = "OK"
+)
+
+// HealthReporter provides a method of declaring a Modules health status.
+type HealthReporter interface {
+ // OK declares that a Module has achieved a desired state and has not entered
+ // any unexpected or incorrect states.
+ // Modules should only declare themselves as 'OK' once they have stabilized,
+ // rather than during their initial state. This should be left to be reported
+ // as the default "unknown" to denote that the module has not reached a "ready"
+ // health state.
+ OK(status string)
+
+ // Stopped reports that a module has completed, and will no longer report any
+ // health status.
+ Stopped(reason string)
+
+ // Degraded declares that a module has entered a degraded state.
+ // This means that it may have failed to provide it's intended services, or
+ // to perform it's desired task.
+ Degraded(reason string, err error)
+}
+
+// Health provides exported functions for accessing health status data.
+// As well, provides unexported functions for use during module apply.
+type Health interface {
+ // All returns a copy of all module statuses.
+ // This includes unknown status for modules that have not reported a status yet.
+ All() []Status
+
+ // Get returns a copy of a modules status, by module ID.
+ // This includes unknown status for modules that have not reported a status yet.
+ Get(string) *Status
+
+ // Stop stops the health provider from processing updates.
+ Stop(context.Context) error
+
+ // forModule creates a moduleID scoped reporter handle.
+ forModule(string) HealthReporter
+
+ // processed returns the number of updates processed.
+ processed() uint64
+}
+
+// Update is an event that denotes the change of a modules health state.
+type Update struct {
+ Level
+ ModuleID string
+ Message string
+ Err error
+}
+
+// Status is a modules last health state, including the last update.
+type Status struct {
+ // Update is the last reported update for a module.
+ Update
+ // Stopped is true when a module has been completed, thus it contains
+ // its last reporter status. New updates will not be processed.
+ Stopped bool
+ // Final is the stopped message, if the module has been stopped.
+ Final string
+ // LastOK is the time of the last OK status update.
+ LastOK time.Time
+ // LastUpdated is the time of the last status update.
+ LastUpdated time.Time
+}
+
+// String returns a string representation of a Status, implements fmt.Stringer.
+func (s *Status) String() string {
+ var sinceLast string
+ if s.LastUpdated.IsZero() {
+ sinceLast = "never"
+ } else {
+ sinceLast = time.Since(s.LastUpdated).String() + " ago"
+ }
+ return fmt.Sprintf("Status{ModuleID: %s, Level: %s, Since: %s, Message: %s, Err: %v}",
+ s.ModuleID, s.Level, sinceLast, s.Message, s.Err)
+}
+
+// NewHealthProvider starts and returns a health status which processes
+// health status updates.
+func NewHealthProvider() Health {
+ p := &healthProvider{
+ moduleStatuses: make(map[string]Status),
+ running: true,
+ }
+ return p
+}
+
+func (p *healthProvider) processed() uint64 {
+ return p.numProcessed.Load()
+}
+
+func (p *healthProvider) process(u Update) {
+ prev := func() Status {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+
+ t := time.Now()
+ prev := p.moduleStatuses[u.ModuleID]
+
+ if !p.running {
+ return prev
+ }
+
+ ns := Status{
+ Update: u,
+ LastUpdated: t,
+ }
+ switch u.Level {
+ case StatusOK:
+ ns.LastOK = t
+ case StatusStopped:
+ // If Stopped, set that module was stopped and preserve last known status.
+ ns = prev
+ ns.Stopped = true
+ ns.Final = u.Message
+ }
+ p.moduleStatuses[u.ModuleID] = ns
+ log.WithField("status", ns.String()).Debug("Processed new health status")
+ return prev
+ }()
+ p.numProcessed.Add(1)
+ if prev.Stopped {
+ log.Warnf("module %q reported health status after being Stopped", u.ModuleID)
+ }
+}
+
+// Finish stops the status provider, and waits for all updates to be processed or
+// returns an error if the context is cancelled first.
+func (p *healthProvider) Stop(ctx context.Context) error {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ p.running = false // following this, no new reporters will send.
+ return nil
+}
+
+// forModule returns a module scoped status reporter handle for emitting status updates.
+// This is used to automatically provide declared modules with a status reported.
+func (p *healthProvider) forModule(moduleID string) HealthReporter {
+ p.mu.Lock()
+ p.moduleStatuses[moduleID] = Status{Update: Update{
+ ModuleID: moduleID,
+ Level: StatusUnknown,
+ Message: "No status reported yet"},
+ }
+ p.mu.Unlock()
+
+ return &reporter{
+ moduleID: moduleID,
+ process: p.process,
+ }
+}
+
+// All returns a copy of all the latest statuses.
+func (p *healthProvider) All() []Status {
+ p.mu.RLock()
+ all := maps.Values(p.moduleStatuses)
+ p.mu.RUnlock()
+ sort.Slice(all, func(i, j int) bool {
+ return all[i].ModuleID < all[j].ModuleID
+ })
+ return all
+}
+
+// Get returns the latest status for a module, by module ID.
+func (p *healthProvider) Get(moduleID string) *Status {
+ p.mu.RLock()
+ defer p.mu.RUnlock()
+ s, ok := p.moduleStatuses[moduleID]
+ if ok {
+ return &s
+ }
+ return nil
+}
+
+type healthProvider struct {
+ mu lock.RWMutex
+
+ running bool
+ numProcessed atomic.Uint64
+
+ moduleStatuses map[string]Status
+}
+
+// reporter is a handle for emitting status updates.
+type reporter struct {
+ moduleID string
+ process func(Update)
+}
+
+// Degraded reports a degraded status update, should be used when a module encounters a
+// a state that is not fully reconciled.
+func (r *reporter) Degraded(reason string, err error) {
+ r.process(Update{ModuleID: r.moduleID, Level: StatusDegraded, Message: reason, Err: err})
+}
+
+// Stopped reports that a module has stopped, further updates will not be processed.
+func (r *reporter) Stopped(reason string) {
+ r.process(Update{ModuleID: r.moduleID, Level: StatusStopped, Message: reason})
+}
+
+// OK reports that a module is in a healthy state.
+func (r *reporter) OK(status string) {
+ r.process(Update{ModuleID: r.moduleID, Level: StatusOK, Message: status})
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/hive/cell/info.go b/vendor/github.com/cilium/cilium/pkg/hive/cell/info.go
new file mode 100644
index 000000000..7c2075328
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/hive/cell/info.go
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package cell
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+
+ "github.com/davecgh/go-spew/spew"
+ "golang.org/x/term"
+)
+
+const (
+ // indentBy is the number of spaces nested elements should be indented by
+ indentBy = 4
+)
+
+type InfoPrinter struct {
+ io.Writer
+ width int
+}
+
+func NewInfoPrinter() *InfoPrinter {
+ width, _, err := term.GetSize(int(os.Stdout.Fd()))
+ if err != nil {
+ width = 120
+ }
+ return &InfoPrinter{
+ Writer: os.Stdout,
+ width: width,
+ }
+}
+
+// Info provides a simple way of printing cells hierarchically in
+// textual form.
+type Info interface {
+ Print(indent int, w *InfoPrinter)
+}
+
+type InfoLeaf string
+
+func (l InfoLeaf) Print(indent int, w *InfoPrinter) {
+ buf := bufio.NewWriter(w)
+ indentString := strings.Repeat(" ", indent)
+ buf.WriteString(indentString)
+ currentLineLength := len(indentString)
+ wrapped := false
+ for _, f := range strings.Fields(string(l)) {
+ newLineLength := currentLineLength + len(f) + 1
+ if newLineLength >= w.width {
+ buf.WriteByte('\n')
+ if !wrapped {
+ // Increase the indent for the wrapped lines so it's clear we
+ // wrapped.
+ wrapped = true
+ indent += 2
+ indentString = strings.Repeat(" ", indent)
+ }
+ buf.WriteString(indentString)
+ currentLineLength = indent + len(f) + 1
+ } else {
+ currentLineLength = newLineLength
+ }
+ buf.WriteString(f)
+ buf.WriteByte(' ')
+ }
+ buf.WriteByte('\n')
+ buf.Flush()
+}
+
+type InfoNode struct {
+ // Header line. If missing, no header printed and children
+ // not indented.
+ header string
+ condensed bool
+
+ children []Info
+}
+
+func NewInfoNode(header string) *InfoNode {
+ return &InfoNode{header: header}
+}
+
+func (n *InfoNode) Add(child Info) {
+ n.children = append(n.children, child)
+}
+
+func (n *InfoNode) AddLeaf(format string, args ...any) {
+ n.Add(InfoLeaf(fmt.Sprintf(format, args...)))
+}
+
+func (n *InfoNode) Print(indent int, w *InfoPrinter) {
+ if n.header != "" {
+ fmt.Fprintf(w, "%s%s:\n", strings.Repeat(" ", indent), n.header)
+ indent += indentBy
+ }
+
+ for i, child := range n.children {
+ child.Print(indent, w)
+ if !n.condensed && i != len(n.children)-1 {
+ w.Write([]byte{'\n'})
+ }
+ }
+}
+
+type InfoStruct struct {
+ value any
+}
+
+func (n *InfoStruct) Print(indent int, w *InfoPrinter) {
+ scs := spew.ConfigState{Indent: strings.Repeat(" ", indentBy), SortKeys: true}
+ indentString := strings.Repeat(" ", indent)
+ for i, line := range strings.Split(scs.Sdump(n.value), "\n") {
+ if i == 0 {
+ fmt.Fprintf(w, "%s⚙️ %s\n", indentString, line)
+ } else {
+ fmt.Fprintf(w, "%s%s\n", indentString, line)
+ }
+ }
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/hive/cell/invoke.go b/vendor/github.com/cilium/cilium/pkg/hive/cell/invoke.go
new file mode 100644
index 000000000..31beeea15
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/hive/cell/invoke.go
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package cell
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+ "time"
+
+ "go.uber.org/dig"
+
+ "github.com/cilium/cilium/pkg/hive/internal"
+)
+
+type invoker struct {
+ funcs []namedFunc
+}
+
+type namedFunc struct {
+ name string
+ fn any
+ info dig.InvokeInfo
+}
+
+type InvokerList interface {
+ AppendInvoke(func() error)
+}
+
+func (inv *invoker) invoke(cont container) error {
+ for i, afn := range inv.funcs {
+ log.WithField("function", afn.name).Debug("Invoking")
+ t0 := time.Now()
+ if err := cont.Invoke(afn.fn, dig.FillInvokeInfo(&inv.funcs[i].info)); err != nil {
+ log.WithError(err).WithField("", afn.name).Error("Invoke failed")
+ return err
+ }
+ d := time.Since(t0)
+ log.WithField("duration", d).WithField("function", afn.name).Info("Invoked")
+ }
+ return nil
+}
+
+func (i *invoker) Apply(c container) error {
+ // Remember the scope in which we need to invoke.
+ invoker := func() error { return i.invoke(c) }
+
+ // Append the invoker to the list of invoke functions. These are invoked
+ // prior to start to build up the objects. They are not invoked directly
+ // here as first the configuration flags need to be registered. This allows
+ // using hives in a command-line application with many commands and where
+ // we don't yet know which command to run, but we still need to register
+ // all the flags.
+ return c.Invoke(func(l InvokerList) {
+ l.AppendInvoke(invoker)
+ })
+}
+
+func (i *invoker) Info(container) Info {
+ n := NewInfoNode("")
+ for _, namedFunc := range i.funcs {
+ invNode := NewInfoNode(fmt.Sprintf("🛠️ %s", namedFunc.name))
+ invNode.condensed = true
+
+ var ins []string
+ for _, input := range namedFunc.info.Inputs {
+ ins = append(ins, internal.TrimName(input.String()))
+ }
+ sort.Strings(ins)
+ invNode.AddLeaf("⇨ %s", strings.Join(ins, ", "))
+ n.Add(invNode)
+ }
+ return n
+}
+
+// Invoke constructs a cell for invoke functions. The invoke functions are executed
+// when the hive is started to instantiate all objects via the constructors.
+func Invoke(funcs ...any) Cell {
+ namedFuncs := []namedFunc{}
+ for _, fn := range funcs {
+ namedFuncs = append(
+ namedFuncs,
+ namedFunc{name: internal.FuncNameAndLocation(fn), fn: fn})
+ }
+ return &invoker{funcs: namedFuncs}
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/hive/cell/metric.go b/vendor/github.com/cilium/cilium/pkg/hive/cell/metric.go
new file mode 100644
index 000000000..76b9cdbf4
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/hive/cell/metric.go
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package cell
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/prometheus/client_golang/prometheus"
+ "go.uber.org/dig"
+
+ "github.com/cilium/cilium/pkg/hive/internal"
+ pkgmetric "github.com/cilium/cilium/pkg/metrics/metric"
+)
+
+var (
+ withMeta pkgmetric.WithMetadata
+ collector prometheus.Collector
+)
+
+// Metric constructs a new metric cell.
+//
+// This cell type provides `S` to the hive as returned by `ctor`, it also makes each individual field
+// value available via the `hive-metrics` value group. Infrastructure components such as a registry,
+// inspection tool, or documentation generator can collect all metrics in the hive via this value group.
+//
+// The `ctor` constructor must return a struct or pointer to a struct of type `S`. The returned struct
+// must only contain public fields. All field types should implement the
+// `github.com/cilium/cilium/pkg/metrics/metric.WithMetadata`
+// and `github.com/prometheus/client_golang/prometheus.Collector` interfaces.
+func Metric[S any](ctor func() S) Cell {
+ var nilOut S
+ outTyp := reflect.TypeOf(nilOut)
+ if outTyp.Kind() == reflect.Ptr {
+ outTyp = outTyp.Elem()
+ }
+
+ if outTyp.Kind() != reflect.Struct {
+ panic(fmt.Errorf(
+ "cell.Metric must be invoked with a constructor function that returns a struct or pointer to a struct, "+
+ "a constructor which returns a %s was supplied",
+ outTyp.Kind(),
+ ))
+ }
+
+ // Let's be strict for now, could lift this in the future if we ever need to
+ if outTyp.NumField() == 0 {
+ panic(fmt.Errorf(
+ "cell.Metric must be invoked with a constructor function that returns exactly a struct with at least 1 " +
+ "metric, a constructor which returns a struct with zero fields was supplied",
+ ))
+ }
+
+ withMetaTyp := reflect.TypeOf(&withMeta).Elem()
+ collectorTyp := reflect.TypeOf(&collector).Elem()
+ for i := 0; i < outTyp.NumField(); i++ {
+ field := outTyp.Field(i)
+ if !field.IsExported() {
+ panic(fmt.Errorf(
+ "The struct returned by the constructor passed to cell.Metric has a private field '%s', which "+
+ "is not allowed. All fields on the returning struct must be exported",
+ field.Name,
+ ))
+ }
+
+ if !field.Type.Implements(withMetaTyp) {
+ panic(fmt.Errorf(
+ "The struct returned by the constructor passed to cell.Metric has a field '%s', which is not metric.WithMetadata.",
+ field.Name,
+ ))
+ }
+
+ if !field.Type.Implements(collectorTyp) {
+ panic(fmt.Errorf(
+ "The struct returned by the constructor passed to cell.Metric has a field '%s', which is not prometheus.Collector.",
+ field.Name,
+ ))
+ }
+ }
+
+ return &metric[S]{
+ ctor: ctor,
+ }
+}
+
+type metric[S any] struct {
+ ctor func() S
+}
+
+type metricOut struct {
+ dig.Out
+
+ Metrics []pkgmetric.WithMetadata `group:"hive-metrics,flatten"`
+}
+
+func (m *metric[S]) provideMetrics(metricSet S) metricOut {
+ var metrics []pkgmetric.WithMetadata
+
+ value := reflect.ValueOf(metricSet)
+ typ := value.Type()
+ if typ.Kind() == reflect.Pointer {
+ value = value.Elem()
+ typ = typ.Elem()
+ }
+
+ if typ.Kind() != reflect.Struct {
+ return metricOut{}
+ }
+
+ for i := 0; i < typ.NumField(); i++ {
+ if withMeta, ok := value.Field(i).Interface().(pkgmetric.WithMetadata); ok {
+ metrics = append(metrics, withMeta)
+ }
+ }
+
+ return metricOut{
+ Metrics: metrics,
+ }
+}
+
+func (m *metric[S]) Info(container) Info {
+ n := NewInfoNode(fmt.Sprintf("📈 %s", internal.FuncNameAndLocation(m.ctor)))
+ n.condensed = true
+
+ return n
+}
+
+func (m *metric[S]) Apply(container container) error {
+ // Provide the supplied constructor, so its return type is directly accessible by cells
+ container.Provide(m.ctor, dig.Export(true))
+
+ // Provide the metrics provider, which will take the return value of the constructor and turn it into a
+ // slice of metrics to be consumed by anyone interested in handling them.
+ container.Provide(m.provideMetrics, dig.Export(true))
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/hive/cell/module.go b/vendor/github.com/cilium/cilium/pkg/hive/cell/module.go
new file mode 100644
index 000000000..74fa5f98e
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/hive/cell/module.go
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package cell
+
+import (
+ "fmt"
+ "regexp"
+
+ "github.com/sirupsen/logrus"
+ "go.uber.org/dig"
+
+ "github.com/cilium/cilium/pkg/logging/logfields"
+)
+
+// Module creates a scoped set of cells with a given identifier.
+//
+// The id and title will be included in the object dump (hive.PrintObjects).
+// The id must be lower-case, at most 30 characters and only contain [a-z0-9-_].
+// Title can contain [a-zA-Z0-9_- ] and must be shorter than 80 characters.
+//
+// Private constructors with a module (ProvidePrivate) are only accessible
+// within this module and its sub-modules.
+func Module(id, title string, cells ...Cell) Cell {
+ validateIDAndTitle(id, title)
+ return &module{id, title, cells}
+}
+
+var (
+ idRegex = regexp.MustCompile(`^[a-z][a-z0-9_\-]{1,30}$`)
+ titleRegex = regexp.MustCompile(`^[a-zA-Z0-9_\- ]{1,80}$`)
+)
+
+func validateIDAndTitle(id, title string) {
+ if !idRegex.MatchString(id) {
+ panic(fmt.Sprintf("Invalid hive.Module id: %q, expected to id match %s", id, idRegex))
+ }
+ if !titleRegex.MatchString(title) {
+ panic(fmt.Sprintf("Invalid hive.Module title: %q, expected to title match %s", title, titleRegex))
+ }
+}
+
+type module struct {
+ // id is the module identity. It is shown in object output and is used to derive
+ // the scoped logger.
+ id string
+
+ // title is a human-readable short title for the module. Shown in object output
+ // alongside the identifier.
+ title string
+
+ cells []Cell
+}
+
+func (m *module) logger(log logrus.FieldLogger) logrus.FieldLogger {
+ return log.WithField(logfields.LogSubsys, m.id)
+}
+
+func (m *module) moduleScopedStatusReporter(p Health) HealthReporter {
+ return p.forModule(m.id)
+}
+
+func (m *module) Apply(c container) error {
+ scope := c.Scope(m.id)
+
+ // Provide module scoped status reporter, used for reporting module level
+ // health status.
+ if err := scope.Provide(m.moduleScopedStatusReporter, dig.Export(false)); err != nil {
+ return err
+ }
+
+ if err := scope.Decorate(m.logger); err != nil {
+ return err
+ }
+
+ for _, cell := range m.cells {
+ if err := cell.Apply(scope); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (m *module) Info(c container) Info {
+ n := NewInfoNode("Ⓜ️ " + m.id + " (" + m.title + ")")
+ for _, cell := range m.cells {
+ n.Add(cell.Info(c))
+ }
+ return n
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/hive/cell/provide.go b/vendor/github.com/cilium/cilium/pkg/hive/cell/provide.go
new file mode 100644
index 000000000..6b21cc3c0
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/hive/cell/provide.go
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package cell
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+
+ "go.uber.org/dig"
+
+ "github.com/cilium/cilium/pkg/hive/internal"
+)
+
+// provider is a set of constructors
+type provider struct {
+ ctors []any
+ infos []dig.ProvideInfo
+ export bool
+}
+
+func (p *provider) Apply(c container) error {
+ p.infos = make([]dig.ProvideInfo, len(p.ctors))
+ for i, ctor := range p.ctors {
+ if err := c.Provide(ctor, dig.Export(p.export), dig.FillProvideInfo(&p.infos[i])); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (p *provider) Info(container) Info {
+ n := &InfoNode{}
+ for i, ctor := range p.ctors {
+ info := p.infos[i]
+ privateSymbol := ""
+ if !p.export {
+ privateSymbol = "🔒️"
+ }
+
+ ctorNode := NewInfoNode(fmt.Sprintf("🚧%s %s", privateSymbol, internal.FuncNameAndLocation(ctor)))
+ ctorNode.condensed = true
+
+ var ins, outs []string
+ for _, input := range info.Inputs {
+ ins = append(ins, internal.TrimName(input.String()))
+ }
+ sort.Strings(ins)
+ for _, output := range info.Outputs {
+ outs = append(outs, internal.TrimName(output.String()))
+ }
+ sort.Strings(outs)
+ if len(ins) > 0 {
+ ctorNode.AddLeaf("⇨ %s", strings.Join(ins, ", "))
+ }
+ ctorNode.AddLeaf("⇦ %s", strings.Join(outs, ", "))
+ n.Add(ctorNode)
+ }
+ return n
+}
+
+// Provide constructs a new cell with the given constructors.
+// Constructor is any function that takes zero or more parameters and returns
+// one or more values and optionally an error. For example, the following forms
+// are accepted:
+//
+// func() A
+// func(A, B, C) (D, error).
+//
+// If the constructor depends on a type that is not provided by any constructor
+// the hive will fail to run with an error pointing at the missing type.
+//
+// A constructor can also take as parameter a structure of parameters annotated
+// with `cell.In`, or return a struct annotated with `cell.Out`:
+//
+// type params struct {
+// cell.In
+// Flower *Flower
+// Sun *Sun
+// }
+//
+// type out struct {
+// cell.Out
+// Honey *Honey
+// Nectar *Nectar
+// }
+//
+// func newBee(params) (out, error)
+func Provide(ctors ...any) Cell {
+ return &provider{ctors: ctors, export: true}
+}
+
+// ProvidePrivate is like Provide, but the constructed objects are only
+// available within the module it is defined and nested modules.
+func ProvidePrivate(ctors ...any) Cell {
+ return &provider{ctors: ctors, export: false}
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/hive/command.go b/vendor/github.com/cilium/cilium/pkg/hive/command.go
new file mode 100644
index 000000000..fbc74a223
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/hive/command.go
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package hive
+
+import (
+ "github.com/sirupsen/logrus"
+ "github.com/spf13/cobra"
+
+ "github.com/cilium/cilium/pkg/logging"
+)
+
+// Command constructs the cobra command for hive. The hive
+// command can be used to inspect the dependency graph.
+func (h *Hive) Command() *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "hive",
+ Short: "Inspect the hive",
+ Run: func(cmd *cobra.Command, args []string) {
+ // Silence log messages from calling invokes and constructors.
+ logging.SetLogLevel(logrus.WarnLevel)
+ h.PrintObjects()
+ },
+ TraverseChildren: false,
+ }
+ h.RegisterFlags(cmd.PersistentFlags())
+
+ cmd.AddCommand(
+ &cobra.Command{
+ Use: "dot-graph",
+ Short: "Output the dependencies graph in graphviz dot format",
+ Run: func(cmd *cobra.Command, args []string) {
+ h.PrintDotGraph()
+ },
+ TraverseChildren: false,
+ })
+
+ return cmd
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/hive/doc.go b/vendor/github.com/cilium/cilium/pkg/hive/doc.go
new file mode 100644
index 000000000..c10f95e7c
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/hive/doc.go
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+/*
+Package hive provides the infrastructure for building Cilium applications from modular components (cells).
+
+Hive is implemented using the uber/dig library, which provides the dependency injection for
+objects in the hive. It is similar to uber/fx, but adds an opinionated approach to configuration.
+
+The configuration for cells is extracted from Viper. By default the field names are assumed to correspond
+to flag names, e.g. field 'MyOption' corresponds to '--my-option' flag.
+
+The hive constructor, New(), takes the viper instance and the pflag FlagSet as parameters and registers
+the flags from all cells and binds them to viper variables. Once the FlagSet and viper configuration has been
+parsed one can call Populate() to pull the values from viper and construct the application. The hive can
+then be Run().
+
+# Example
+
+For a runnable example see pkg/hive/example.
+
+Try running:
+
+ example$ go run .
+ (ctrl-c stops)
+
+ example$ go run . --dot-graph | dot -Tx11
+
+Try also commenting out cell.Provide lines and seeing what the dependency errors look like.
+*/
+package hive
diff --git a/vendor/github.com/cilium/cilium/pkg/hive/hive.go b/vendor/github.com/cilium/cilium/pkg/hive/hive.go
new file mode 100644
index 000000000..8fb588419
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/hive/hive.go
@@ -0,0 +1,369 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package hive
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "os"
+ "os/signal"
+ "reflect"
+ "strings"
+ "syscall"
+ "time"
+
+ "github.com/sirupsen/logrus"
+ "github.com/spf13/pflag"
+ "github.com/spf13/viper"
+ "go.uber.org/dig"
+
+ "github.com/cilium/cilium/pkg/hive/cell"
+ "github.com/cilium/cilium/pkg/logging"
+ "github.com/cilium/cilium/pkg/logging/logfields"
+)
+
+var (
+ log = logging.DefaultLogger.WithField(logfields.LogSubsys, "hive")
+)
+
+const (
+ // defaultStartTimeout is the amount of time allotted for start hooks. After
+ // this duration the context passed to the start hooks is cancelled.
+ defaultStartTimeout = 5 * time.Minute
+
+ // defaultStopTimeout is the amount of time allotted for stop hooks.
+ defaultStopTimeout = time.Minute
+
+ // defaultEnvPrefix is the default prefix for environment variables, e.g.
+ // flag "foo" can be set with environment variable "CILIUM_FOO".
+ defaultEnvPrefix = "CILIUM_"
+)
+
+// Hive is a framework building modular applications.
+//
+// It implements dependency injection using the dig library.
+//
+// See pkg/hive/example for a runnable example application.
+type Hive struct {
+ container *dig.Container
+ cells []cell.Cell
+ shutdown chan error
+ envPrefix string
+ startTimeout, stopTimeout time.Duration
+ flags *pflag.FlagSet
+ viper *viper.Viper
+ lifecycle *DefaultLifecycle
+ populated bool
+ invokes []func() error
+ configOverrides []any
+}
+
+// New returns a new hive that can be run, or inspected.
+// The command-line flags from the cells are registered as part of this.
+//
+// The object graph is not constructed until methods of the hive are
+// invoked.
+//
+// Applications should call RegisterFlags() to register the hive's command-line
+// flags. Likewise if configuration settings come from configuration files, then
+// the Viper() method can be used to populate the hive's viper instance.
+func New(cells ...cell.Cell) *Hive {
+ h := &Hive{
+ container: dig.New(),
+ envPrefix: defaultEnvPrefix,
+ cells: cells,
+ viper: viper.New(),
+ startTimeout: defaultStartTimeout,
+ stopTimeout: defaultStopTimeout,
+ flags: pflag.NewFlagSet("", pflag.ContinueOnError),
+ lifecycle: &DefaultLifecycle{},
+ shutdown: make(chan error, 1),
+ configOverrides: nil,
+ }
+
+ if err := h.provideDefaults(); err != nil {
+ log.WithError(err).Fatal("Failed to provide default objects")
+ }
+
+ // Use a single health provider for all cells, which is used to create
+ // module scoped health reporters.
+ if err := h.container.Provide(func(lc Lifecycle) cell.Health {
+ hp := cell.NewHealthProvider()
+ lc.Append(Hook{
+ OnStop: func(ctx HookContext) error {
+ return hp.Stop(ctx)
+ },
+ })
+ return hp
+ }); err != nil {
+ log.WithError(err).Fatal("Failed to provide health provider")
+ }
+
+ // Apply all cells to the container. This registers all constructors
+ // and adds all config flags. Invokes are delayed until Start() is
+ // called.
+ for _, cell := range cells {
+ if err := cell.Apply(h.container); err != nil {
+ log.WithError(err).Fatal("Failed to apply cell")
+ }
+ }
+
+ // Bind the newly registered flags to viper.
+ h.flags.VisitAll(func(f *pflag.Flag) {
+ if err := h.viper.BindPFlag(f.Name, f); err != nil {
+ log.Fatalf("BindPFlag: %s", err)
+ }
+ if err := h.viper.BindEnv(f.Name, h.getEnvName(f.Name)); err != nil {
+ log.Fatalf("BindEnv: %s", err)
+ }
+ })
+
+ return h
+}
+
+// RegisterFlags adds all flags in the hive to the given flag set.
+// Fatals if a flag already exists in the given flag set.
+// Use with e.g. cobra.Command:
+//
+// cmd := &cobra.Command{...}
+// h.RegisterFlags(cmd.Flags())
+func (h *Hive) RegisterFlags(flags *pflag.FlagSet) {
+ h.flags.VisitAll(func(f *pflag.Flag) {
+ if flags.Lookup(f.Name) != nil {
+ log.Fatalf("Error registering flag: '%s' already registered", f.Name)
+ }
+ flags.AddFlag(f)
+ })
+}
+
+// Viper returns the hive's viper instance.
+func (h *Hive) Viper() *viper.Viper {
+ return h.viper
+}
+
+type defaults struct {
+ dig.Out
+
+ Flags *pflag.FlagSet
+ Lifecycle Lifecycle
+ Logger logrus.FieldLogger
+ Shutdowner Shutdowner
+ InvokerList cell.InvokerList
+}
+
+func (h *Hive) provideDefaults() error {
+ return h.container.Provide(func() defaults {
+ return defaults{
+ Flags: h.flags,
+ Lifecycle: h.lifecycle,
+ Logger: log,
+ Shutdowner: h,
+ InvokerList: h,
+ }
+ })
+}
+
+func (h *Hive) SetTimeouts(start, stop time.Duration) {
+ h.startTimeout, h.stopTimeout = start, stop
+}
+
+func (h *Hive) SetEnvPrefix(prefix string) {
+ h.envPrefix = prefix
+}
+
+// AddConfigOverride appends a config override function to modify
+// a configuration after it has been parsed.
+//
+// This method is only meant to be used in tests.
+func AddConfigOverride[Cfg cell.Flagger](h *Hive, override func(*Cfg)) {
+ h.configOverrides = append(h.configOverrides, override)
+}
+
+// Run populates the cell configurations and runs the hive cells.
+// Interrupt signal or call to Shutdowner.Shutdown() will cause the hive to stop.
+func (h *Hive) Run() error {
+ startCtx, cancel := context.WithTimeout(context.Background(), h.startTimeout)
+ defer cancel()
+
+ var errs error
+ if err := h.Start(startCtx); err != nil {
+ errs = errors.Join(errs, fmt.Errorf("failed to start: %w", err))
+ }
+
+ // If start was successful, wait for Shutdown() or interrupt.
+ if errs == nil {
+ errs = errors.Join(errs, h.waitForSignalOrShutdown())
+ }
+
+ stopCtx, cancel := context.WithTimeout(context.Background(), h.stopTimeout)
+ defer cancel()
+
+ if err := h.Stop(stopCtx); err != nil {
+ errs = errors.Join(errs, fmt.Errorf("failed to stop: %w", err))
+ }
+ return errs
+}
+
+func (h *Hive) waitForSignalOrShutdown() error {
+ signals := make(chan os.Signal, 1)
+ defer signal.Stop(signals)
+ signal.Notify(signals, os.Interrupt, syscall.SIGTERM)
+ select {
+ case sig := <-signals:
+ log.WithField("signal", sig).Info("Signal received")
+ return nil
+ case err := <-h.shutdown:
+ return err
+ }
+}
+
+// Populate instantiates the hive. Use for testing that the hive can
+// be instantiated.
+func (h *Hive) Populate() error {
+ if h.populated {
+ return nil
+ }
+ h.populated = true
+
+ // Provide all the parsed settings to the config cells.
+ err := h.container.Provide(
+ func() cell.AllSettings {
+ return cell.AllSettings(h.viper.AllSettings())
+ })
+ if err != nil {
+ return err
+ }
+
+ // Provide config overriders if any
+ for _, o := range h.configOverrides {
+ v := reflect.ValueOf(o)
+ // Check that the config override is of type func(*cfg) and
+ // 'cfg' implements Flagger.
+ t := v.Type()
+ if t.Kind() != reflect.Func || t.NumIn() != 1 {
+ return fmt.Errorf("config override has invalid type %T, expected func(*T)", o)
+ }
+ flaggerType := reflect.TypeOf((*cell.Flagger)(nil)).Elem()
+ if !t.In(0).Implements(flaggerType) {
+ return fmt.Errorf("config override function parameter (%T) does not implement Flagger", o)
+ }
+
+ // Construct the provider function: 'func() func(*cfg)'. This is
+ // picked up by the config cell and called to mutate the config
+ // after it has been parsed.
+ providerFunc := func(in []reflect.Value) []reflect.Value {
+ return []reflect.Value{v}
+ }
+ providerFuncType := reflect.FuncOf(nil, []reflect.Type{t}, false)
+ pfv := reflect.MakeFunc(providerFuncType, providerFunc)
+ if err := h.container.Provide(pfv.Interface()); err != nil {
+ return fmt.Errorf("providing config override failed: %w", err)
+ }
+ }
+
+ // Execute the invoke functions to construct the objects.
+ for _, invoke := range h.invokes {
+ if err := invoke(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (h *Hive) AppendInvoke(invoke func() error) {
+ h.invokes = append(h.invokes, invoke)
+}
+
+// Start starts the hive. The context allows cancelling the start.
+// If context is cancelled and the start hooks do not respect the cancellation
+// then after 5 more seconds the process will be terminated forcefully.
+func (h *Hive) Start(ctx context.Context) error {
+ if err := h.Populate(); err != nil {
+ return err
+ }
+
+ defer close(h.fatalOnTimeout(ctx))
+
+ log.Info("Starting")
+
+ return h.lifecycle.Start(ctx)
+}
+
+// Stop stops the hive. The context allows cancelling the stop.
+// If context is cancelled and the stop hooks do not respect the cancellation
+// then after 5 more seconds the process will be terminated forcefully.
+func (h *Hive) Stop(ctx context.Context) error {
+ defer close(h.fatalOnTimeout(ctx))
+ log.Info("Stopping")
+ return h.lifecycle.Stop(ctx)
+}
+
+func (h *Hive) fatalOnTimeout(ctx context.Context) chan struct{} {
+ terminated := make(chan struct{}, 1)
+ go func() {
+ select {
+ case <-terminated:
+ // Start/stop terminated in time, nothing to do.
+ return
+
+ case <-ctx.Done():
+ }
+
+ // Context was cancelled. Give 5 more seconds and then
+ // go fatal.
+ select {
+ case <-terminated:
+ case <-time.After(5 * time.Second):
+ log.Fatal("Start or stop failed to finish on time, aborting forcefully.")
+ }
+ }()
+ return terminated
+}
+
+// Shutdown implements the Shutdowner interface and is provided
+// for the cells to use for triggering a early shutdown.
+func (h *Hive) Shutdown(opts ...ShutdownOption) {
+ var o shutdownOptions
+ for _, opt := range opts {
+ opt.apply(&o)
+ }
+
+ // If there already is an error in the channel, no-op
+ select {
+ case h.shutdown <- o.err:
+ default:
+ }
+}
+
+func (h *Hive) PrintObjects() {
+ if err := h.Populate(); err != nil {
+ log.WithError(err).Fatal("Failed to populate object graph")
+ }
+
+ fmt.Printf("Cells:\n\n")
+ ip := cell.NewInfoPrinter()
+ for _, c := range h.cells {
+ c.Info(h.container).Print(2, ip)
+ fmt.Println()
+ }
+ h.lifecycle.PrintHooks()
+}
+
+func (h *Hive) PrintDotGraph() {
+ if err := h.Populate(); err != nil {
+ log.WithError(err).Fatal("Failed to populate object graph")
+ }
+
+ if err := dig.Visualize(h.container, os.Stdout); err != nil {
+ log.WithError(err).Fatal("Failed to Visualize()")
+ }
+}
+
+// getEnvName returns the environment variable to be used for the given option name.
+func (h *Hive) getEnvName(option string) string {
+ under := strings.Replace(option, "-", "_", -1)
+ upper := strings.ToUpper(under)
+ return h.envPrefix + upper
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/hive/internal/reflect.go b/vendor/github.com/cilium/cilium/pkg/hive/internal/reflect.go
new file mode 100644
index 000000000..6fef8842e
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/hive/internal/reflect.go
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package internal
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "reflect"
+ "regexp"
+ "runtime"
+ "strings"
+)
+
+var (
+ baseNameRegex = regexp.MustCompile(`github\.com/cilium/cilium/[\w\/]+/`)
+)
+
+func TrimName(name string) string {
+ return string(baseNameRegex.ReplaceAll([]byte(name), []byte{}))
+}
+
+func PrettyType(x any) string {
+ return TrimName(fmt.Sprintf("%T", x))
+}
+
+func FuncNameAndLocation(fn any) string {
+ f := runtime.FuncForPC(reflect.ValueOf(fn).Pointer())
+ file, line := f.FileLine(f.Entry())
+ name := TrimName(f.Name())
+ name = strings.TrimSuffix(name, "-fm")
+ if file != "" {
+ return fmt.Sprintf("%s (%s:%d)", name, usefulPathSegment(file), line)
+ }
+ return name
+}
+
+// Purely a heuristic.
+var commonRoots = map[string]struct{}{
+ "pkg": {},
+ "cmd": {},
+}
+
+func usefulPathSegment(file string) string {
+ p := filepath.Clean(file)
+ segs := strings.Split(p, string(os.PathSeparator))
+ for i := len(segs) - 1; i > 0; i-- {
+ if _, ok := commonRoots[segs[i]]; ok {
+ segs = segs[i:]
+ break
+ }
+ }
+ return filepath.Join(segs...)
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/hive/lifecycle.go b/vendor/github.com/cilium/cilium/pkg/hive/lifecycle.go
new file mode 100644
index 000000000..764bd739d
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/hive/lifecycle.go
@@ -0,0 +1,206 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package hive
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "time"
+
+ "github.com/cilium/cilium/pkg/hive/internal"
+ "github.com/cilium/cilium/pkg/lock"
+)
+
+// HookContext is a context passed to a lifecycle hook that is cancelled
+// in case of timeout. Hooks that perform long blocking operations directly
+// in the start or stop function (e.g. connecting to external services to
+// initialize) must abort any such operation if this context is cancelled.
+type HookContext context.Context
+
+// Hook is a pair of start and stop callbacks. Both are optional.
+// They're paired up to make sure that on failed start all corresponding
+// stop hooks are executed.
+type Hook struct {
+ OnStart func(HookContext) error
+ OnStop func(HookContext) error
+}
+
+func (h Hook) Start(ctx HookContext) error {
+ if h.OnStart == nil {
+ return nil
+ }
+ return h.OnStart(ctx)
+}
+
+func (h Hook) Stop(ctx HookContext) error {
+ if h.OnStop == nil {
+ return nil
+ }
+ return h.OnStop(ctx)
+}
+
+type HookInterface interface {
+ // Start hook is called when the hive is started.
+ // Returning a non-nil error causes the start to abort and
+ // the stop hooks for already started cells to be called.
+ //
+ // The context is valid only for the duration of the start
+ // and is used to allow aborting of start hook on timeout.
+ Start(HookContext) error
+
+ // Stop hook is called when the hive is stopped or start aborted.
+ // Returning a non-nil error does not abort stopping. The error
+ // is recorded and rest of the stop hooks are executed.
+ Stop(HookContext) error
+}
+
+// Lifecycle enables cells to register start and stop hooks, either
+// from a constructor or an invoke function.
+type Lifecycle interface {
+ Append(HookInterface)
+}
+
+// DefaultLifecycle lifecycle implements a simple lifecycle management that conforms
+// to Lifecycle. It is exported for use in applications that have nested lifecycles
+// (e.g. operator).
+type DefaultLifecycle struct {
+ mu lock.Mutex
+ hooks []HookInterface
+ numStarted int
+}
+
+func (lc *DefaultLifecycle) Append(hook HookInterface) {
+ lc.mu.Lock()
+ defer lc.mu.Unlock()
+
+ lc.hooks = append(lc.hooks, hook)
+}
+
+func (lc *DefaultLifecycle) Start(ctx context.Context) error {
+ lc.mu.Lock()
+ defer lc.mu.Unlock()
+
+ // Wrap the context to make sure it gets cancelled after
+ // start hooks have completed in order to discourage using
+ // the context for unintended purposes.
+ ctx, cancel := context.WithCancel(ctx)
+ defer cancel()
+
+ for _, hook := range lc.hooks {
+ fnName, exists := getHookFuncName(hook, true)
+
+ if !exists {
+ // Count as started as there might be a stop hook.
+ lc.numStarted++
+ continue
+ }
+
+ l := log.WithField("function", fnName)
+ l.Debug("Executing start hook")
+ t0 := time.Now()
+ if err := hook.Start(ctx); err != nil {
+ l.WithError(err).Error("Start hook failed")
+ return err
+ }
+ d := time.Since(t0)
+ l.WithField("duration", d).Info("Start hook executed")
+ lc.numStarted++
+ }
+ return nil
+}
+
+func (lc *DefaultLifecycle) Stop(ctx context.Context) error {
+ lc.mu.Lock()
+ defer lc.mu.Unlock()
+
+ // Wrap the context to make sure it gets cancelled after
+ // stop hooks have completed in order to discourage using
+ // the context for unintended purposes.
+ ctx, cancel := context.WithCancel(ctx)
+ defer cancel()
+
+ var errs error
+ for ; lc.numStarted > 0; lc.numStarted-- {
+ if ctx.Err() != nil {
+ return ctx.Err()
+ }
+ hook := lc.hooks[lc.numStarted-1]
+
+ fnName, exists := getHookFuncName(hook, false)
+ if !exists {
+ continue
+ }
+ l := log.WithField("function", fnName)
+ l.Debug("Executing stop hook")
+ t0 := time.Now()
+ if err := hook.Stop(ctx); err != nil {
+ l.WithError(err).Error("Stop hook failed")
+ errs = errors.Join(errs, err)
+ } else {
+ d := time.Since(t0)
+ l.WithField("duration", d).Info("Stop hook executed")
+ }
+ }
+ return errs
+}
+
+func (lc *DefaultLifecycle) PrintHooks() {
+ lc.mu.Lock()
+ defer lc.mu.Unlock()
+
+ fmt.Printf("Start hooks:\n\n")
+ for _, hook := range lc.hooks {
+ fnName, exists := getHookFuncName(hook, true)
+ if !exists {
+ continue
+ }
+ fmt.Printf(" • %s\n", fnName)
+ }
+
+ fmt.Printf("\nStop hooks:\n\n")
+ for i := len(lc.hooks) - 1; i >= 0; i-- {
+ hook := lc.hooks[i]
+ fnName, exists := getHookFuncName(hook, false)
+ if !exists {
+ continue
+ }
+ fmt.Printf(" • %s\n", fnName)
+ }
+}
+
+func getHookFuncName(hook HookInterface, start bool) (name string, hasHook bool) {
+ // Ok, we need to get a bit fancy here as runtime.FuncForPC does
+ // not return what we want: we get "hive.Hook.Stop()" when we want
+ // "*foo.Stop(). We do know the concrete type, and we do know
+ // the method name, so we check here whether we're dealing with
+ // "Hook" the struct, or an object implementing HookInterface.
+ //
+ // We could use reflection + FuncForPC to get around this, but it
+ // still wouldn't work for generic types (file would be "")
+ // and the type params would be missing, so instead we'll just use the
+ // type name + method name.
+ switch hook := hook.(type) {
+ case Hook:
+ if start {
+ if hook.OnStart == nil {
+ return "", false
+ }
+ return internal.FuncNameAndLocation(hook.OnStart), true
+ }
+ if hook.OnStop == nil {
+ return "", false
+ }
+ return internal.FuncNameAndLocation(hook.OnStop), true
+
+ default:
+ if start {
+ return internal.PrettyType(hook) + ".Start", true
+ }
+ return internal.PrettyType(hook) + ".Stop", true
+
+ }
+}
+
+var _ Lifecycle = &DefaultLifecycle{}
diff --git a/vendor/github.com/cilium/cilium/pkg/hive/shutdowner.go b/vendor/github.com/cilium/cilium/pkg/hive/shutdowner.go
new file mode 100644
index 000000000..d53ddaf28
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/hive/shutdowner.go
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package hive
+
+// Shutdowner provides Shutdown(), which is a way to trigger stop for hive.
+//
+// To shut down with an error, call Shutdown with ShutdownWithError(err).
+// This error will be returned from Run().
+type Shutdowner interface {
+ Shutdown(...ShutdownOption)
+}
+
+type ShutdownOption interface {
+ apply(*shutdownOptions)
+}
+
+// ShutdownWithError shuts down with an error.
+func ShutdownWithError(err error) ShutdownOption {
+ return optionFunc(func(opts *shutdownOptions) {
+ opts.err = err
+ })
+}
+
+type optionFunc func(*shutdownOptions)
+
+func (fn optionFunc) apply(opts *shutdownOptions) { fn(opts) }
+
+type shutdownOptions struct {
+ err error
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/hubble/api/v1/flow.go b/vendor/github.com/cilium/cilium/pkg/hubble/api/v1/flow.go
index 3dbb73885..b1d5507a7 100644
--- a/vendor/github.com/cilium/cilium/pkg/hubble/api/v1/flow.go
+++ b/vendor/github.com/cilium/cilium/pkg/hubble/api/v1/flow.go
@@ -11,7 +11,7 @@ import (
// FlowProtocol returns the protocol best describing the flow. If available,
// this is the L7 protocol name, then the L4 protocol name.
func FlowProtocol(flow *pb.Flow) string {
- switch flow.GetEventType().Type {
+ switch flow.GetEventType().GetType() {
case monitorAPI.MessageTypeAccessLog:
if l7 := flow.GetL7(); l7 != nil {
switch {
diff --git a/vendor/github.com/cilium/cilium/pkg/hubble/api/v1/interface.go b/vendor/github.com/cilium/cilium/pkg/hubble/api/v1/interface.go
index 314c1ac66..6854993eb 100644
--- a/vendor/github.com/cilium/cilium/pkg/hubble/api/v1/interface.go
+++ b/vendor/github.com/cilium/cilium/pkg/hubble/api/v1/interface.go
@@ -6,6 +6,8 @@ package v1
import (
"github.com/cilium/cilium/pkg/identity"
slim_corev1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1"
+ "github.com/cilium/cilium/pkg/labels"
+ "github.com/cilium/cilium/pkg/policy"
)
// EndpointInfo defines readable fields of a Cilium endpoint.
@@ -16,4 +18,5 @@ type EndpointInfo interface {
GetK8sNamespace() string
GetLabels() []string
GetPod() *slim_corev1.Pod
+ GetRealizedPolicyRuleLabelsForKey(key policy.Key) (derivedFrom labels.LabelArrayList, revision uint64, ok bool)
}
diff --git a/vendor/github.com/cilium/cilium/pkg/hubble/filters/http.go b/vendor/github.com/cilium/cilium/pkg/hubble/filters/http.go
index d325e4a29..1025a1049 100644
--- a/vendor/github.com/cilium/cilium/pkg/hubble/filters/http.go
+++ b/vendor/github.com/cilium/cilium/pkg/hubble/filters/http.go
@@ -91,6 +91,33 @@ func filterByHTTPMethods(methods []string) (FilterFunc, error) {
}, nil
}
+func filterByHTTPUrls(urlRegexpStrs []string) (FilterFunc, error) {
+ urlRegexps := make([]*regexp.Regexp, 0, len(urlRegexpStrs))
+ for _, urlRegexpStr := range urlRegexpStrs {
+ urlRegexp, err := regexp.Compile(urlRegexpStr)
+ if err != nil {
+ return nil, fmt.Errorf("%s: %v", urlRegexpStr, err)
+ }
+ urlRegexps = append(urlRegexps, urlRegexp)
+ }
+
+ return func(ev *v1.Event) bool {
+ http := ev.GetFlow().GetL7().GetHttp()
+
+ if http == nil || http.Url == "" {
+ return false
+ }
+
+ for _, urlRegexp := range urlRegexps {
+ if urlRegexp.MatchString(http.Url) {
+ return true
+ }
+ }
+
+ return false
+ }, nil
+}
+
func filterByHTTPPaths(pathRegexpStrs []string) (FilterFunc, error) {
pathRegexps := make([]*regexp.Regexp, 0, len(pathRegexpStrs))
for _, pathRegexpStr := range pathRegexpStrs {
@@ -170,5 +197,18 @@ func (h *HTTPFilter) OnBuildFilter(ctx context.Context, ff *flowpb.FlowFilter) (
fs = append(fs, pathf)
}
+ if ff.GetHttpUrl() != nil {
+ if !httpMatchCompatibleEventFilter(ff.GetEventType()) {
+ return nil, errors.New("filtering by http url requires " +
+ "the event type filter to only match 'l7' events")
+ }
+
+ pathf, err := filterByHTTPUrls(ff.GetHttpUrl())
+ if err != nil {
+ return nil, fmt.Errorf("invalid http url filter: %v", err)
+ }
+ fs = append(fs, pathf)
+ }
+
return fs, nil
}
diff --git a/vendor/github.com/cilium/cilium/pkg/iana/svcname.go b/vendor/github.com/cilium/cilium/pkg/iana/svcname.go
new file mode 100644
index 000000000..f888b87e1
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/iana/svcname.go
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package iana
+
+import (
+ "regexp"
+)
+
+// IANA Service Name consists of alphanumeric characters of which at
+// least one is not a number, as well as non-consecutive dashes ('-')
+// except for in the beginning or the end.
+// Note: Character case must be ignored when comparing service names.
+var isSvcName = regexp.MustCompile(`^([a-zA-Z0-9]-?)*[a-zA-Z](-?[a-zA-Z0-9])*$`).MatchString
+
+// IsSvcName returns true if the string conforms to IANA Service Name specification
+// (RFC 6335 Section 5.1. Service Name Syntax)
+func IsSvcName(name string) bool {
+ return len(name) > 0 && len(name) <= 15 && isSvcName(name)
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/identity/cache/allocator.go b/vendor/github.com/cilium/cilium/pkg/identity/cache/allocator.go
new file mode 100644
index 000000000..afd27d440
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/identity/cache/allocator.go
@@ -0,0 +1,567 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package cache
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "net"
+ "net/netip"
+ "path"
+
+ "github.com/sirupsen/logrus"
+
+ "github.com/cilium/cilium/pkg/allocator"
+ "github.com/cilium/cilium/pkg/identity"
+ "github.com/cilium/cilium/pkg/identity/key"
+ "github.com/cilium/cilium/pkg/idpool"
+ clientset "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned"
+ "github.com/cilium/cilium/pkg/k8s/identitybackend"
+ "github.com/cilium/cilium/pkg/kvstore"
+ kvstoreallocator "github.com/cilium/cilium/pkg/kvstore/allocator"
+ "github.com/cilium/cilium/pkg/labels"
+ "github.com/cilium/cilium/pkg/lock"
+ "github.com/cilium/cilium/pkg/logging/logfields"
+ "github.com/cilium/cilium/pkg/metrics"
+ "github.com/cilium/cilium/pkg/option"
+ "github.com/cilium/cilium/pkg/stream"
+)
+
+var (
+ // IdentitiesPath is the path to where identities are stored in the
+ // key-value store.
+ IdentitiesPath = path.Join(kvstore.BaseKeyPrefix, "state", "identities", "v1")
+)
+
+// CachingIdentityAllocator manages the allocation of identities for both
+// global and local identities.
+type CachingIdentityAllocator struct {
+ // IdentityAllocator is an allocator for security identities from the
+ // kvstore.
+ IdentityAllocator *allocator.Allocator
+
+ // globalIdentityAllocatorInitialized is closed whenever the global identity
+ // allocator is initialized.
+ globalIdentityAllocatorInitialized chan struct{}
+
+ localIdentities *localIdentityCache
+
+ localNodeIdentities *localIdentityCache
+
+ identitiesPath string
+
+ // This field exists is to hand out references that are either for sending
+ // and receiving. It should not be used directly without converting it first
+ // to a AllocatorEventSendChan or AllocatorEventRecvChan.
+ events allocator.AllocatorEventChan
+ watcher identityWatcher
+
+ // setupMutex synchronizes InitIdentityAllocator() and Close()
+ setupMutex lock.Mutex
+
+ owner IdentityAllocatorOwner
+}
+
+// IdentityAllocatorOwner is the interface the owner of an identity allocator
+// must implement
+type IdentityAllocatorOwner interface {
+ // UpdateIdentities will be called when identities have changed
+ //
+ // The caller is responsible for making sure the same identity
+ // is not present in both 'added' and 'deleted', so that they
+ // can be processed in either order.
+ UpdateIdentities(added, deleted IdentityCache)
+
+ // GetSuffix must return the node specific suffix to use
+ GetNodeSuffix() string
+}
+
+// IdentityAllocator is any type which is responsible for allocating security
+// identities based of sets of labels, and caching information about identities
+// locally.
+type IdentityAllocator interface {
+ // Identity changes are observable.
+ stream.Observable[IdentityChange]
+
+ // WaitForInitialGlobalIdentities waits for the initial set of global
+ // security identities to have been received.
+ WaitForInitialGlobalIdentities(context.Context) error
+
+ // AllocateIdentity allocates an identity described by the specified labels.
+ // A possible previously used numeric identity for these labels can be passed
+ // in as the last parameter; identity.InvalidIdentity must be passed if no
+ // previous numeric identity exists.
+ AllocateIdentity(context.Context, labels.Labels, bool, identity.NumericIdentity) (*identity.Identity, bool, error)
+
+ // Release is the reverse operation of AllocateIdentity() and releases the
+ // specified identity.
+ Release(context.Context, *identity.Identity, bool) (released bool, err error)
+
+ // ReleaseSlice is the slice variant of Release().
+ ReleaseSlice(context.Context, []*identity.Identity) error
+
+ // LookupIdentityByID returns the identity that corresponds to the given
+ // labels.
+ LookupIdentity(ctx context.Context, lbls labels.Labels) *identity.Identity
+
+ // LookupIdentityByID returns the identity that corresponds to the given
+ // numeric identity.
+ LookupIdentityByID(ctx context.Context, id identity.NumericIdentity) *identity.Identity
+
+ // GetIdentityCache returns the current cache of identities that the
+ // allocator has allocated. The caller should not modify the resulting
+ // identities by pointer.
+ GetIdentityCache() IdentityCache
+
+ // GetIdentities returns a copy of the current cache of identities.
+ GetIdentities() IdentitiesModel
+
+ // AllocateCIDRsForIPs attempts to allocate identities for a list of
+ // CIDRs. If any allocation fails, all allocations are rolled back and
+ // the error is returned. When an identity is freshly allocated for a
+ // CIDR, it is added to the ipcache if 'newlyAllocatedIdentities' is
+ // 'nil', otherwise the newly allocated identities are placed in
+ // 'newlyAllocatedIdentities' and it is the caller's responsibility to
+ // upsert them into ipcache by calling UpsertGeneratedIdentities().
+ //
+ // Upon success, the caller must also arrange for the resulting identities to
+ // be released via a subsequent call to ReleaseCIDRIdentitiesByID().
+ //
+ // The implementation for this function currently lives in pkg/ipcache.
+ AllocateCIDRsForIPs(ips []net.IP, newlyAllocatedIdentities map[netip.Prefix]*identity.Identity) ([]*identity.Identity, error)
+
+ // ReleaseCIDRIdentitiesByID() is a wrapper for ReleaseSlice() that
+ // also handles ipcache entries.
+ ReleaseCIDRIdentitiesByID(context.Context, []identity.NumericIdentity)
+}
+
+// InitIdentityAllocator creates the global identity allocator. Only the first
+// invocation of this function will have an effect. The Caller must have
+// initialized well known identities before calling this (by calling
+// identity.InitWellKnownIdentities()).
+// The client is only used by the CRD identity allocator currently.
+// Returns a channel which is closed when initialization of the allocator is
+// completed.
+// TODO: identity backends are initialized directly in this function, pulling
+// in dependencies on kvstore and k8s. It would be better to decouple this,
+// since the backends are an interface.
+func (m *CachingIdentityAllocator) InitIdentityAllocator(client clientset.Interface) <-chan struct{} {
+ m.setupMutex.Lock()
+ defer m.setupMutex.Unlock()
+
+ if m.IdentityAllocator != nil {
+ log.Panic("InitIdentityAllocator() in succession without calling Close()")
+ }
+
+ log.Info("Initializing identity allocator")
+
+ minID := idpool.ID(identity.MinimalAllocationIdentity)
+ maxID := idpool.ID(identity.MaximumAllocationIdentity)
+
+ log.WithFields(map[string]interface{}{
+ "min": minID,
+ "max": maxID,
+ "cluster-id": option.Config.ClusterID,
+ }).Info("Allocating identities between range")
+
+ // In the case of the allocator being closed, we need to create a new events channel
+ // and start a new watch.
+ if m.events == nil {
+ m.events = make(allocator.AllocatorEventChan, eventsQueueSize)
+ m.watcher.watch(m.events)
+ }
+
+ // Asynchronously set up the global identity allocator since it connects
+ // to the kvstore.
+ go func(owner IdentityAllocatorOwner, events allocator.AllocatorEventSendChan, minID, maxID idpool.ID) {
+ m.setupMutex.Lock()
+ defer m.setupMutex.Unlock()
+
+ var (
+ backend allocator.Backend
+ err error
+ )
+
+ switch option.Config.IdentityAllocationMode {
+ case option.IdentityAllocationModeKVstore:
+ log.Debug("Identity allocation backed by KVStore")
+ backend, err = kvstoreallocator.NewKVStoreBackend(m.identitiesPath, owner.GetNodeSuffix(), &key.GlobalIdentity{}, kvstore.Client())
+ if err != nil {
+ log.WithError(err).Fatal("Unable to initialize kvstore backend for identity allocation")
+ }
+
+ case option.IdentityAllocationModeCRD:
+ log.Debug("Identity allocation backed by CRD")
+ backend, err = identitybackend.NewCRDBackend(identitybackend.CRDBackendConfiguration{
+ Store: nil,
+ Client: client,
+ KeyFunc: (&key.GlobalIdentity{}).PutKeyFromMap,
+ })
+ if err != nil {
+ log.WithError(err).Fatal("Unable to initialize Kubernetes CRD backend for identity allocation")
+ }
+
+ default:
+ log.Fatalf("Unsupported identity allocation mode %s", option.Config.IdentityAllocationMode)
+ }
+
+ a, err := allocator.NewAllocator(&key.GlobalIdentity{}, backend,
+ allocator.WithMax(maxID), allocator.WithMin(minID),
+ allocator.WithEvents(events),
+ allocator.WithMasterKeyProtection(),
+ allocator.WithPrefixMask(idpool.ID(option.Config.ClusterID< identity.MaxNumericIdentity {
+ return nil, false, fmt.Errorf("%d: numeric identity too large", idp)
+ }
+
+ if option.Config.Debug {
+ log.WithFields(logrus.Fields{
+ logfields.Identity: idp,
+ logfields.IdentityLabels: lbls.String(),
+ "isNew": isNew,
+ "isNewLocally": isNewLocally,
+ }).Debug("Resolved identity")
+ }
+
+ return identity.NewIdentity(identity.NumericIdentity(idp), lbls), isNew, nil
+}
+
+// Release is the reverse operation of AllocateIdentity() and releases the
+// identity again. This function may result in kvstore operations.
+// After the last user has released the ID, the returned lastUse value is true.
+func (m *CachingIdentityAllocator) Release(ctx context.Context, id *identity.Identity, notifyOwner bool) (released bool, err error) {
+ defer func() {
+ if released {
+ if id.ID.HasLocalScope() {
+ metrics.Identity.WithLabelValues(identity.NodeLocalIdentityType).Dec()
+ } else if id.ID.HasRemoteNodeScope() {
+ metrics.Identity.WithLabelValues(identity.RemoteNodeIdentityType).Dec()
+ } else if id.ID.IsReservedIdentity() {
+ metrics.Identity.WithLabelValues(identity.ReservedIdentityType).Dec()
+ } else {
+ metrics.Identity.WithLabelValues(identity.ClusterLocalIdentityType).Dec()
+ }
+ }
+ if m.owner != nil && released && notifyOwner {
+ deleted := IdentityCache{
+ id.ID: id.LabelArray,
+ }
+ m.owner.UpdateIdentities(nil, deleted)
+ }
+ }()
+
+ // Ignore reserved identities.
+ if id.IsReserved() {
+ return false, nil
+ }
+
+ switch identity.ScopeForLabels(id.Labels) {
+ case identity.IdentityScopeLocal:
+ return m.localIdentities.release(id), nil
+ case identity.IdentityScopeRemoteNode:
+ return m.localNodeIdentities.release(id), nil
+ }
+
+ // This will block until the kvstore can be accessed and all identities
+ // were successfully synced
+ err = m.WaitForInitialGlobalIdentities(ctx)
+ if err != nil {
+ return false, err
+ }
+
+ if m.IdentityAllocator == nil {
+ return false, fmt.Errorf("allocator not initialized")
+ }
+
+ // Rely on the eventual Kv-Store events for delete
+ // notifications of kv-store allocated identities. Even if an
+ // ID is no longer used locally, it may still be used by
+ // remote nodes, so we can't rely on the locally computed
+ // "lastUse".
+ return m.IdentityAllocator.Release(ctx, &key.GlobalIdentity{LabelArray: id.LabelArray})
+}
+
+// ReleaseSlice attempts to release a set of identities. It is a helper
+// function that may be useful for cleaning up multiple identities in paths
+// where several identities may be allocated and another error means that they
+// should all be released.
+func (m *CachingIdentityAllocator) ReleaseSlice(ctx context.Context, identities []*identity.Identity) error {
+ var err error
+ for _, id := range identities {
+ if id == nil {
+ continue
+ }
+ _, err2 := m.Release(ctx, id, false)
+ if err2 != nil {
+ log.WithError(err2).WithFields(logrus.Fields{
+ logfields.Identity: id,
+ }).Error("Failed to release identity")
+ err = err2
+ }
+ }
+ return err
+}
+
+// WatchRemoteIdentities returns a RemoteCache instance which can be later
+// started to watch identities in another kvstore and sync them to the local
+// identity cache. remoteName should be unique unless replacing an existing
+// remote's backend. When cachedPrefix is set, identities are assumed to be
+// stored under the "cilium/cache" prefix, and the watcher is adapted accordingly.
+func (m *CachingIdentityAllocator) WatchRemoteIdentities(remoteName string, backend kvstore.BackendOperations, cachedPrefix bool) (*allocator.RemoteCache, error) {
+ <-m.globalIdentityAllocatorInitialized
+
+ prefix := m.identitiesPath
+ if cachedPrefix {
+ prefix = path.Join(kvstore.StateToCachePrefix(prefix), remoteName)
+ }
+
+ remoteAllocatorBackend, err := kvstoreallocator.NewKVStoreBackend(prefix, m.owner.GetNodeSuffix(), &key.GlobalIdentity{}, backend)
+ if err != nil {
+ return nil, fmt.Errorf("error setting up remote allocator backend: %s", err)
+ }
+
+ remoteAlloc, err := allocator.NewAllocator(&key.GlobalIdentity{}, remoteAllocatorBackend,
+ allocator.WithEvents(m.IdentityAllocator.GetEvents()), allocator.WithoutGC(), allocator.WithoutAutostart())
+ if err != nil {
+ return nil, fmt.Errorf("unable to initialize remote Identity Allocator: %s", err)
+ }
+
+ return m.IdentityAllocator.NewRemoteCache(remoteName, remoteAlloc), nil
+}
+
+func (m *CachingIdentityAllocator) RemoveRemoteIdentities(name string) {
+ if m.IdentityAllocator != nil {
+ m.IdentityAllocator.RemoveRemoteKVStore(name)
+ }
+}
+
+type IdentityChangeKind string
+
+const (
+ IdentityChangeSync IdentityChangeKind = IdentityChangeKind(allocator.AllocatorChangeSync)
+ IdentityChangeUpsert IdentityChangeKind = IdentityChangeKind(allocator.AllocatorChangeUpsert)
+ IdentityChangeDelete IdentityChangeKind = IdentityChangeKind(allocator.AllocatorChangeDelete)
+)
+
+type IdentityChange struct {
+ Kind IdentityChangeKind
+ ID identity.NumericIdentity
+ Labels labels.Labels
+}
+
+// Observe the identity changes. Conforms to stream.Observable.
+// Replays the current state of the cache when subscribing.
+func (m *CachingIdentityAllocator) Observe(ctx context.Context, next func(IdentityChange), complete func(error)) {
+ // This short-lived go routine serves the purpose of waiting for the global identity allocator becoming ready
+ // before starting to observe the underlying allocator for changes.
+ // m.IdentityAllocator is backed by a stream.FuncObservable, that will start its own
+ // go routine. Therefore, the current go routine will stop and free the lock on the setupMutex after the registration.
+ go func() {
+ if err := m.WaitForInitialGlobalIdentities(ctx); err != nil {
+ complete(ctx.Err())
+ return
+ }
+
+ m.setupMutex.Lock()
+ defer m.setupMutex.Unlock()
+
+ if m.IdentityAllocator == nil {
+ complete(errors.New("allocator no longer initialized"))
+ return
+ }
+
+ // Observe the underlying allocator for changes and map the events to identities.
+ stream.Map[allocator.AllocatorChange, IdentityChange](
+ m.IdentityAllocator,
+ func(change allocator.AllocatorChange) IdentityChange {
+ return IdentityChange{
+ Kind: IdentityChangeKind(change.Kind),
+ ID: identity.NumericIdentity(change.ID),
+ Labels: mapLabels(change.Key),
+ }
+ },
+ ).Observe(ctx, next, complete)
+ }()
+}
+
+func mapLabels(allocatorKey allocator.AllocatorKey) labels.Labels {
+ var idLabels labels.Labels = nil
+
+ if allocatorKey != nil {
+ idLabels = labels.Labels{}
+ for k, v := range allocatorKey.GetAsMap() {
+ label := labels.ParseLabel(k + "=" + v)
+ idLabels[label.Key] = label
+ }
+ }
+
+ return idLabels
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/identity/cache/cache.go b/vendor/github.com/cilium/cilium/pkg/identity/cache/cache.go
new file mode 100644
index 000000000..7a0b4c194
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/identity/cache/cache.go
@@ -0,0 +1,280 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package cache
+
+import (
+ "context"
+ "reflect"
+
+ "github.com/cilium/cilium/api/v1/models"
+ "github.com/cilium/cilium/pkg/allocator"
+ "github.com/cilium/cilium/pkg/identity"
+ "github.com/cilium/cilium/pkg/identity/key"
+ identitymodel "github.com/cilium/cilium/pkg/identity/model"
+ "github.com/cilium/cilium/pkg/idpool"
+ "github.com/cilium/cilium/pkg/kvstore"
+ "github.com/cilium/cilium/pkg/labels"
+ "github.com/cilium/cilium/pkg/logging"
+ "github.com/cilium/cilium/pkg/logging/logfields"
+)
+
+var (
+ log = logging.DefaultLogger.WithField(logfields.LogSubsys, "identity-cache")
+)
+
+// IdentityCache is a cache of identity to labels mapping
+type IdentityCache map[identity.NumericIdentity]labels.LabelArray
+
+// IdentitiesModel is a wrapper so that we can implement the sort.Interface
+// to sort the slice by ID
+type IdentitiesModel []*models.Identity
+
+// Less returns true if the element in index `i` is lower than the element
+// in index `j`
+func (s IdentitiesModel) Less(i, j int) bool {
+ return s[i].ID < s[j].ID
+}
+
+// FromIdentityCache populates the provided model from an identity cache.
+func (s IdentitiesModel) FromIdentityCache(cache IdentityCache) IdentitiesModel {
+ for id, lbls := range cache {
+ s = append(s, identitymodel.CreateModel(&identity.Identity{
+ ID: id,
+ Labels: lbls.Labels(),
+ }))
+ }
+ return s
+}
+
+// GetIdentityCache returns a cache of all known identities
+func (m *CachingIdentityAllocator) GetIdentityCache() IdentityCache {
+ log.Debug("getting identity cache for identity allocator manager")
+ cache := IdentityCache{}
+
+ if m.isGlobalIdentityAllocatorInitialized() {
+ m.IdentityAllocator.ForeachCache(func(id idpool.ID, val allocator.AllocatorKey) {
+ if val != nil {
+ if gi, ok := val.(*key.GlobalIdentity); ok {
+ cache[identity.NumericIdentity(id)] = gi.LabelArray
+ } else {
+ log.Warningf("Ignoring unknown identity type '%s': %+v",
+ reflect.TypeOf(val), val)
+ }
+ }
+ })
+ }
+
+ identity.IterateReservedIdentities(func(ni identity.NumericIdentity, id *identity.Identity) {
+ cache[ni] = id.Labels.LabelArray()
+ })
+
+ for _, identity := range m.localIdentities.GetIdentities() {
+ cache[identity.ID] = identity.Labels.LabelArray()
+ }
+ for _, identity := range m.localNodeIdentities.GetIdentities() {
+ cache[identity.ID] = identity.Labels.LabelArray()
+ }
+
+ return cache
+}
+
+// GetIdentities returns all known identities
+func (m *CachingIdentityAllocator) GetIdentities() IdentitiesModel {
+ identities := IdentitiesModel{}
+
+ if m.isGlobalIdentityAllocatorInitialized() {
+ m.IdentityAllocator.ForeachCache(func(id idpool.ID, val allocator.AllocatorKey) {
+ if gi, ok := val.(*key.GlobalIdentity); ok {
+ identity := identity.NewIdentityFromLabelArray(identity.NumericIdentity(id), gi.LabelArray)
+ identities = append(identities, identitymodel.CreateModel(identity))
+ }
+
+ })
+ }
+ identity.IterateReservedIdentities(func(ni identity.NumericIdentity, id *identity.Identity) {
+ identities = append(identities, identitymodel.CreateModel(id))
+ })
+
+ for _, v := range m.localIdentities.GetIdentities() {
+ identities = append(identities, identitymodel.CreateModel(v))
+ }
+ for _, v := range m.localNodeIdentities.GetIdentities() {
+ identities = append(identities, identitymodel.CreateModel(v))
+ }
+
+ return identities
+}
+
+type identityWatcher struct {
+ owner IdentityAllocatorOwner
+}
+
+// collectEvent records the 'event' as an added or deleted identity,
+// and makes sure that any identity is present in only one of the sets
+// (added or deleted).
+func collectEvent(event allocator.AllocatorEvent, added, deleted IdentityCache) bool {
+ id := identity.NumericIdentity(event.ID)
+ // Only create events have the key
+ if event.Typ == kvstore.EventTypeCreate {
+ if gi, ok := event.Key.(*key.GlobalIdentity); ok {
+ // Un-delete the added ID if previously
+ // 'deleted' so that collected events can be
+ // processed in any order.
+ delete(deleted, id)
+ added[id] = gi.LabelArray
+ return true
+ }
+ log.Warningf("collectEvent: Ignoring unknown identity type '%s': %+v",
+ reflect.TypeOf(event.Key), event.Key)
+ return false
+ }
+ // Reverse an add when subsequently deleted
+ delete(added, id)
+ // record the id deleted even if an add was reversed, as the
+ // id may also have previously existed, in which case the
+ // result is not no-op!
+ deleted[id] = labels.LabelArray{}
+
+ return true
+}
+
+// watch starts the identity watcher
+func (w *identityWatcher) watch(events allocator.AllocatorEventRecvChan) {
+
+ go func() {
+ for {
+ added := IdentityCache{}
+ deleted := IdentityCache{}
+ First:
+ for {
+ event, ok := <-events
+ // Wait for one identity add or delete or stop
+ if !ok {
+ // 'events' was closed
+ return
+ }
+ // Collect first added and deleted labels
+ switch event.Typ {
+ case kvstore.EventTypeCreate, kvstore.EventTypeDelete:
+ if collectEvent(event, added, deleted) {
+ // First event collected
+ break First
+ }
+ default:
+ // Ignore modify events
+ }
+ }
+
+ More:
+ for {
+ // see if there is more, but do not wait nor stop
+ select {
+ case event, ok := <-events:
+ if !ok {
+ // 'events' was closed
+ break More
+ }
+ // Collect more added and deleted labels
+ switch event.Typ {
+ case kvstore.EventTypeCreate, kvstore.EventTypeDelete:
+ collectEvent(event, added, deleted)
+ default:
+ // Ignore modify events
+ }
+ default:
+ // No more events available without blocking
+ break More
+ }
+ }
+ // Issue collected updates
+ w.owner.UpdateIdentities(added, deleted) // disjoint sets
+ }
+ }()
+}
+
+// isGlobalIdentityAllocatorInitialized returns true if m.IdentityAllocator is not nil.
+// Note: This does not mean that the identities have been synchronized,
+// see WaitForInitialGlobalIdentities to wait for a fully populated cache.
+func (m *CachingIdentityAllocator) isGlobalIdentityAllocatorInitialized() bool {
+ select {
+ case <-m.globalIdentityAllocatorInitialized:
+ return m.IdentityAllocator != nil
+ default:
+ return false
+ }
+}
+
+// LookupIdentity looks up the identity by its labels but does not create it.
+// This function will first search through the local cache, then the caches for
+// remote kvstores and finally fall back to the main kvstore.
+// May return nil for lookups if the allocator has not yet been synchronized.
+func (m *CachingIdentityAllocator) LookupIdentity(ctx context.Context, lbls labels.Labels) *identity.Identity {
+ if reservedIdentity := identity.LookupReservedIdentityByLabels(lbls); reservedIdentity != nil {
+ return reservedIdentity
+ }
+
+ switch identity.ScopeForLabels(lbls) {
+ case identity.IdentityScopeLocal:
+ return m.localIdentities.lookup(lbls)
+ case identity.IdentityScopeRemoteNode:
+ return m.localNodeIdentities.lookup(lbls)
+ }
+
+ if !m.isGlobalIdentityAllocatorInitialized() {
+ return nil
+ }
+
+ lblArray := lbls.LabelArray()
+ id, err := m.IdentityAllocator.GetIncludeRemoteCaches(ctx, &key.GlobalIdentity{LabelArray: lblArray})
+ if err != nil {
+ return nil
+ }
+ if id > identity.MaxNumericIdentity {
+ return nil
+ }
+
+ if id == idpool.NoID {
+ return nil
+ }
+
+ return identity.NewIdentityFromLabelArray(identity.NumericIdentity(id), lblArray)
+}
+
+var unknownIdentity = identity.NewIdentity(identity.IdentityUnknown, labels.Labels{labels.IDNameUnknown: labels.NewLabel(labels.IDNameUnknown, "", labels.LabelSourceReserved)})
+
+// LookupIdentityByID returns the identity by ID. This function will first
+// search through the local cache, then the caches for remote kvstores and
+// finally fall back to the main kvstore
+// May return nil for lookups if the allocator has not yet been synchronized.
+func (m *CachingIdentityAllocator) LookupIdentityByID(ctx context.Context, id identity.NumericIdentity) *identity.Identity {
+ if id == identity.IdentityUnknown {
+ return unknownIdentity
+ }
+
+ if identity := identity.LookupReservedIdentity(id); identity != nil {
+ return identity
+ }
+
+ switch id.Scope() {
+ case identity.IdentityScopeLocal:
+ return m.localIdentities.lookupByID(id)
+ case identity.IdentityScopeRemoteNode:
+ return m.localNodeIdentities.lookupByID(id)
+ }
+
+ if !m.isGlobalIdentityAllocatorInitialized() {
+ return nil
+ }
+
+ allocatorKey, err := m.IdentityAllocator.GetByIDIncludeRemoteCaches(ctx, idpool.ID(id))
+ if err != nil {
+ return nil
+ }
+
+ if gi, ok := allocatorKey.(*key.GlobalIdentity); ok {
+ return identity.NewIdentityFromLabelArray(id, gi.LabelArray)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/identity/cache/local.go b/vendor/github.com/cilium/cilium/pkg/identity/cache/local.go
new file mode 100644
index 000000000..28fa130b5
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/identity/cache/local.go
@@ -0,0 +1,206 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package cache
+
+import (
+ "fmt"
+
+ "github.com/cilium/cilium/pkg/allocator"
+ "github.com/cilium/cilium/pkg/identity"
+ "github.com/cilium/cilium/pkg/identity/key"
+ "github.com/cilium/cilium/pkg/idpool"
+ "github.com/cilium/cilium/pkg/kvstore"
+ "github.com/cilium/cilium/pkg/labels"
+ "github.com/cilium/cilium/pkg/lock"
+)
+
+type localIdentityCache struct {
+ mutex lock.RWMutex
+ identitiesByID map[identity.NumericIdentity]*identity.Identity
+ identitiesByLabels map[string]*identity.Identity
+ nextNumericIdentity identity.NumericIdentity
+ scope identity.NumericIdentity
+ minID identity.NumericIdentity
+ maxID identity.NumericIdentity
+ events allocator.AllocatorEventSendChan
+}
+
+func newLocalIdentityCache(scope, minID, maxID identity.NumericIdentity, events allocator.AllocatorEventSendChan) *localIdentityCache {
+ return &localIdentityCache{
+ identitiesByID: map[identity.NumericIdentity]*identity.Identity{},
+ identitiesByLabels: map[string]*identity.Identity{},
+ nextNumericIdentity: minID,
+ scope: scope,
+ minID: minID,
+ maxID: maxID,
+ events: events,
+ }
+}
+
+func (l *localIdentityCache) bumpNextNumericIdentity() {
+ if l.nextNumericIdentity == l.maxID {
+ l.nextNumericIdentity = l.minID
+ } else {
+ l.nextNumericIdentity++
+ }
+}
+
+// getNextFreeNumericIdentity returns the next available numeric identity or an error
+// If idCandidate has the local scope and is available, it will be returned instead of
+// searching for a new numeric identity.
+// The l.mutex must be held
+func (l *localIdentityCache) getNextFreeNumericIdentity(idCandidate identity.NumericIdentity) (identity.NumericIdentity, error) {
+ // Try first with the given candidate
+ if idCandidate.Scope() == l.scope {
+ if _, taken := l.identitiesByID[idCandidate]; !taken {
+ // let nextNumericIdentity be, allocated identities will be skipped anyway
+ log.Debugf("Reallocated restored local identity: %d", idCandidate)
+ return idCandidate, nil
+ }
+ }
+ firstID := l.nextNumericIdentity
+ for {
+ idCandidate = l.nextNumericIdentity | l.scope
+ if _, taken := l.identitiesByID[idCandidate]; !taken {
+ l.bumpNextNumericIdentity()
+ return idCandidate, nil
+ }
+
+ l.bumpNextNumericIdentity()
+ if l.nextNumericIdentity == firstID {
+ return 0, fmt.Errorf("out of local identity space")
+ }
+ }
+}
+
+// lookupOrCreate searches for the existence of a local identity with the given
+// labels. If it exists, the reference count is incremented and the identity is
+// returned. If it does not exist, a new identity is created with a unique
+// numeric identity. All identities returned by lookupOrCreate() must be
+// released again via localIdentityCache.release().
+// A possible previously used numeric identity for these labels can be passed
+// in as the 'oldNID' parameter; identity.InvalidIdentity must be passed if no
+// previous numeric identity exists. 'oldNID' will be reallocated if available.
+func (l *localIdentityCache) lookupOrCreate(lbls labels.Labels, oldNID identity.NumericIdentity) (*identity.Identity, bool, error) {
+ // Not converting to string saves an allocation, as byte key lookups into
+ // string maps are optimized by the compiler, see
+ // https://github.com/golang/go/issues/3512.
+ repr := lbls.SortedList()
+
+ l.mutex.Lock()
+ defer l.mutex.Unlock()
+
+ if id, ok := l.identitiesByLabels[string(repr)]; ok {
+ id.ReferenceCount++
+ return id, false, nil
+ }
+
+ numericIdentity, err := l.getNextFreeNumericIdentity(oldNID)
+ if err != nil {
+ return nil, false, err
+ }
+
+ id := &identity.Identity{
+ ID: numericIdentity,
+ Labels: lbls,
+ LabelArray: lbls.LabelArray(),
+ ReferenceCount: 1,
+ }
+
+ l.identitiesByLabels[string(repr)] = id
+ l.identitiesByID[numericIdentity] = id
+
+ if l.events != nil {
+ l.events <- allocator.AllocatorEvent{
+ Typ: kvstore.EventTypeCreate,
+ ID: idpool.ID(id.ID),
+ Key: &key.GlobalIdentity{LabelArray: id.LabelArray},
+ }
+ }
+
+ return id, true, nil
+}
+
+// release releases a local identity from the cache. true is returned when the
+// last use of the identity has been released and the identity has been
+// forgotten.
+func (l *localIdentityCache) release(id *identity.Identity) bool {
+ l.mutex.Lock()
+ defer l.mutex.Unlock()
+
+ if id, ok := l.identitiesByID[id.ID]; ok {
+ switch {
+ case id.ReferenceCount > 1:
+ id.ReferenceCount--
+ return false
+
+ case id.ReferenceCount == 1:
+ // Release is only attempted once, when the reference count is
+ // hitting the last use
+ delete(l.identitiesByLabels, string(id.Labels.SortedList()))
+ delete(l.identitiesByID, id.ID)
+
+ if l.events != nil {
+ l.events <- allocator.AllocatorEvent{
+ Typ: kvstore.EventTypeDelete,
+ ID: idpool.ID(id.ID),
+ }
+ }
+
+ return true
+ }
+ }
+
+ return false
+}
+
+// lookup searches for a local identity matching the given labels and returns
+// it. If found, the reference count is NOT incremented and thus release must
+// NOT be called.
+func (l *localIdentityCache) lookup(lbls labels.Labels) *identity.Identity {
+ l.mutex.RLock()
+ defer l.mutex.RUnlock()
+
+ if id, ok := l.identitiesByLabels[string(lbls.SortedList())]; ok {
+ return id
+ }
+
+ return nil
+}
+
+// lookupByID searches for a local identity matching the given ID and returns
+// it. If found, the reference count is NOT incremented and thus release must
+// NOT be called.
+func (l *localIdentityCache) lookupByID(id identity.NumericIdentity) *identity.Identity {
+ l.mutex.RLock()
+ defer l.mutex.RUnlock()
+
+ if id, ok := l.identitiesByID[id]; ok {
+ return id
+ }
+
+ return nil
+}
+
+// GetIdentities returns all local identities
+func (l *localIdentityCache) GetIdentities() map[identity.NumericIdentity]*identity.Identity {
+ cache := map[identity.NumericIdentity]*identity.Identity{}
+
+ l.mutex.RLock()
+ defer l.mutex.RUnlock()
+
+ for key, id := range l.identitiesByID {
+ cache[key] = id
+ }
+
+ return cache
+}
+
+// close removes the events channel.
+func (l *localIdentityCache) close() {
+ l.mutex.Lock()
+ defer l.mutex.Unlock()
+
+ l.events = nil
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/identity/identity.go b/vendor/github.com/cilium/cilium/pkg/identity/identity.go
index bd00be515..9f45d5154 100644
--- a/vendor/github.com/cilium/cilium/pkg/identity/identity.go
+++ b/vendor/github.com/cilium/cilium/pkg/identity/identity.go
@@ -9,6 +9,7 @@ import (
"strconv"
"github.com/cilium/cilium/pkg/labels"
+ "github.com/cilium/cilium/pkg/option"
)
const (
@@ -16,6 +17,7 @@ const (
ReservedIdentityType = "reserved"
ClusterLocalIdentityType = "cluster_local"
WellKnownIdentityType = "well_known"
+ RemoteNodeIdentityType = "remote_node"
)
// Identity is the representation of the security context for a particular set of
@@ -180,18 +182,33 @@ func (pair *IPIdentityPair) PrefixString() string {
// RequiresGlobalIdentity returns true if the label combination requires a
// global identity
func RequiresGlobalIdentity(lbls labels.Labels) bool {
- needsGlobal := true
+ return ScopeForLabels(lbls) == IdentityScopeGlobal
+}
+
+// ScopeForLabels returns the identity scope to be used for the label set.
+// If all labels are either CIDR or reserved, then returns the CIDR scope.
+// Note: This assumes the caller has already called LookupReservedIdentityByLabels;
+// it does not handle that case.
+func ScopeForLabels(lbls labels.Labels) NumericIdentity {
+ scope := IdentityScopeGlobal
+
+ // If this is a remote node, return the remote node scope.
+ // Note that this is not reachable when policy-cidr-selects-nodes is false, since
+ // callers will already have gotten a value from LookupReservedIdentityByLabels.
+ if lbls.Has(labels.LabelRemoteNode[labels.IDNameRemoteNode]) {
+ return IdentityScopeRemoteNode
+ }
for _, label := range lbls {
switch label.Source {
case labels.LabelSourceCIDR, labels.LabelSourceReserved:
- needsGlobal = false
+ scope = IdentityScopeLocal
default:
- return true
+ return IdentityScopeGlobal
}
}
- return needsGlobal
+ return scope
}
// AddUserDefinedNumericIdentitySet adds all key-value pairs from the given map
@@ -224,72 +241,59 @@ func LookupReservedIdentityByLabels(lbls labels.Labels) *Identity {
return identity
}
- for _, lbl := range lbls {
- var createID bool
- switch {
+ // Check if a fixed identity exists.
+ if lbl, exists := lbls[labels.LabelKeyFixedIdentity]; exists {
// If the set of labels contain a fixed identity then and exists in
// the map of reserved IDs then return the identity of that reserved ID.
- case lbl.Key == labels.LabelKeyFixedIdentity:
- id := GetReservedID(lbl.Value)
- if id != IdentityUnknown && IsUserReservedIdentity(id) {
- return LookupReservedIdentity(id)
- }
- // If a fixed identity was not found then we return nil to avoid
- // falling to a reserved identity.
- return nil
+ id := GetReservedID(lbl.Value)
+ if id != IdentityUnknown && IsUserReservedIdentity(id) {
+ return LookupReservedIdentity(id)
+ }
+ // If a fixed identity was not found then we return nil to avoid
+ // falling to a reserved identity.
+ return nil
+ }
+
+ // If there is no reserved label, return nil.
+ if !lbls.IsReserved() {
+ return nil
+ }
- case lbl.Source == labels.LabelSourceReserved:
- id := GetReservedID(lbl.Key)
- switch {
- case id == ReservedIdentityKubeAPIServer && lbls.Has(labels.LabelHost[labels.IDNameHost]):
- // Due to Golang map iteration order (random) we might get the
- // ID returned as kube-apiserver. If there's a local host
- // label, then we know this is local host reserved ID, so
- // change it as such. All local host traffic should always be
- // considered host (and not kube-apiserver).
- //
- // The kube-apiserver label can be a part of a few identities:
- // * host
- // * kube-apiserver reserved identity (contains remote-node
- // label)
- // * (maybe) CIDR
- id = ReservedIdentityHost
- fallthrough
- case id == ReservedIdentityKubeAPIServer && lbls.Has(labels.LabelRemoteNode[labels.IDNameRemoteNode]):
- createID = true
-
- case id == ReservedIdentityRemoteNode && lbls.Has(labels.LabelKubeAPIServer[labels.IDNameKubeAPIServer]):
- // Due to Golang map iteration order (random) we might get the
- // ID returned as remote-node. If there's a kube-apiserver
- // label, then we know this is kube-apiserver reserved ID, so
- // change it as such. Only traffic to non-kube-apiserver nodes
- // should be considered as remote-node.
- id = ReservedIdentityKubeAPIServer
- fallthrough
- case id == ReservedIdentityHost || id == ReservedIdentityRemoteNode:
- // If it contains the reserved, local host or remote node
- // identity, return it with the new list of labels. This is to
- // ensure that the local node or remote node retain their
- // identity regardless of label changes.
- createID = true
- }
-
- if createID {
- return NewIdentity(id, lbls)
- }
-
- // If it doesn't contain a fixed-identity then make sure the set of
- // labels only contains a single label and that label is of the
- // reserved type. This is to prevent users from adding
- // cilium-reserved labels into the workloads.
- if len(lbls) != 1 {
- return nil
- }
- if id != IdentityUnknown && !IsUserReservedIdentity(id) {
- return LookupReservedIdentity(id)
- }
+ var nid NumericIdentity
+ if lbls.Has(labels.LabelHost[labels.IDNameHost]) {
+ nid = ReservedIdentityHost
+ } else if lbls.Has(labels.LabelRemoteNode[labels.IDNameRemoteNode]) {
+ // If selecting remote-nodes via CIDR policies is allowed, then
+ // they no longer have a reserved identity.
+ if option.Config.PolicyCIDRMatchesNodes() {
+ return nil
+ }
+ nid = ReservedIdentityRemoteNode
+ if lbls.Has(labels.LabelKubeAPIServer[labels.IDNameKubeAPIServer]) {
+ // If there's a kube-apiserver label, then we know this is
+ // kube-apiserver reserved ID, so change it as such.
+ // Only traffic from non-kube-apiserver nodes should be
+ // considered as remote-node.
+ nid = ReservedIdentityKubeAPIServer
}
}
+
+ if nid != IdentityUnknown {
+ return NewIdentity(nid, lbls)
+ }
+
+ // We have handled all the cases where multiple labels can be present.
+ // So, we make sure the set of labels only contains a single label and
+ // that label is of the reserved type. This is to prevent users from
+ // adding cilium-reserved labels into the workloads.
+ if len(lbls) != 1 {
+ return nil
+ }
+
+ nid = GetReservedID(lbls.ToSlice()[0].Key)
+ if nid != IdentityUnknown && !IsUserReservedIdentity(nid) {
+ return LookupReservedIdentity(nid)
+ }
return nil
}
diff --git a/vendor/github.com/cilium/cilium/pkg/identity/identitymanager/doc.go b/vendor/github.com/cilium/cilium/pkg/identity/identitymanager/doc.go
new file mode 100644
index 000000000..fbf1b56fa
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/identity/identitymanager/doc.go
@@ -0,0 +1,6 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Package identitymanager tracks which global identities are being used by
+// the currently running cilium-agent
+package identitymanager
diff --git a/vendor/github.com/cilium/cilium/pkg/identity/identitymanager/log.go b/vendor/github.com/cilium/cilium/pkg/identity/identitymanager/log.go
new file mode 100644
index 000000000..b038e68ce
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/identity/identitymanager/log.go
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package identitymanager
+
+import (
+ "github.com/cilium/cilium/pkg/logging"
+ "github.com/cilium/cilium/pkg/logging/logfields"
+)
+
+var (
+ log = logging.DefaultLogger.WithField(logfields.LogSubsys, "identitymanager")
+)
diff --git a/vendor/github.com/cilium/cilium/pkg/identity/identitymanager/manager.go b/vendor/github.com/cilium/cilium/pkg/identity/identitymanager/manager.go
new file mode 100644
index 000000000..81766a099
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/identity/identitymanager/manager.go
@@ -0,0 +1,216 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package identitymanager
+
+import (
+ "github.com/sirupsen/logrus"
+
+ "github.com/cilium/cilium/api/v1/models"
+ "github.com/cilium/cilium/pkg/identity"
+ "github.com/cilium/cilium/pkg/identity/model"
+ "github.com/cilium/cilium/pkg/lock"
+ "github.com/cilium/cilium/pkg/logging/logfields"
+)
+
+var (
+ // GlobalIdentityManager is a singleton instance of an IdentityManager, used
+ // for easy updating / tracking lifecycles of identities on the local node
+ // without having to pass around a specific instance of an IdentityManager
+ // throughout Cilium.
+ GlobalIdentityManager = NewIdentityManager()
+)
+
+// IdentityManager caches information about a set of identities, currently a
+// reference count of how many users there are for each identity.
+type IdentityManager struct {
+ mutex lock.RWMutex
+ identities map[identity.NumericIdentity]*identityMetadata
+ observers map[Observer]struct{}
+}
+
+type identityMetadata struct {
+ identity *identity.Identity
+ refCount uint
+}
+
+// NewIdentityManager returns an initialized IdentityManager.
+func NewIdentityManager() *IdentityManager {
+ return &IdentityManager{
+ identities: make(map[identity.NumericIdentity]*identityMetadata),
+ observers: make(map[Observer]struct{}),
+ }
+}
+
+// Add inserts the identity into the GlobalIdentityManager.
+func Add(identity *identity.Identity) {
+ GlobalIdentityManager.Add(identity)
+}
+
+// Remove deletes the identity from the GlobalIdentityManager.
+func Remove(identity *identity.Identity) {
+ GlobalIdentityManager.Remove(identity)
+}
+
+// RemoveAll deletes all identities from the GlobalIdentityManager.
+func RemoveAll() {
+ GlobalIdentityManager.RemoveAll()
+}
+
+// Add inserts the identity into the identity manager. If the identity is
+// already in the identity manager, the reference count for the identity is
+// incremented.
+func (idm *IdentityManager) Add(identity *identity.Identity) {
+ log.WithFields(logrus.Fields{
+ logfields.Identity: identity,
+ }).Debug("Adding identity to the identity manager")
+
+ idm.mutex.Lock()
+ defer idm.mutex.Unlock()
+ idm.add(identity)
+}
+
+func (idm *IdentityManager) add(identity *identity.Identity) {
+
+ if identity == nil {
+ return
+ }
+
+ idMeta, exists := idm.identities[identity.ID]
+ if !exists {
+ idm.identities[identity.ID] = &identityMetadata{
+ identity: identity,
+ refCount: 1,
+ }
+ for o := range idm.observers {
+ o.LocalEndpointIdentityAdded(identity)
+ }
+
+ } else {
+ idMeta.refCount++
+ }
+}
+
+// RemoveOldAddNew removes old from the identity manager and inserts new
+// into the IdentityManager.
+// Caller must have previously added the old identity with Add().
+// This is a no-op if both identities have the same numeric ID.
+func (idm *IdentityManager) RemoveOldAddNew(old, new *identity.Identity) {
+ idm.mutex.Lock()
+ defer idm.mutex.Unlock()
+
+ if old == nil && new == nil {
+ return
+ }
+ // The host endpoint will always retain its reserved ID, but its labels may
+ // change so we need to update its identity.
+ if old != nil && new != nil && old.ID == new.ID && new.ID != identity.ReservedIdentityHost {
+ return
+ }
+
+ log.WithFields(logrus.Fields{
+ "old": old,
+ "new": new,
+ }).Debug("removing old and adding new identity")
+
+ idm.remove(old)
+ idm.add(new)
+}
+
+// RemoveOldAddNew removes old from and inserts new into the
+// GlobalIdentityManager.
+func RemoveOldAddNew(old, new *identity.Identity) {
+ GlobalIdentityManager.RemoveOldAddNew(old, new)
+}
+
+// RemoveAll removes all identities.
+func (idm *IdentityManager) RemoveAll() {
+ idm.mutex.Lock()
+ defer idm.mutex.Unlock()
+
+ for id := range idm.identities {
+ idm.remove(idm.identities[id].identity)
+ }
+}
+
+// Remove deletes the identity from the identity manager. If the identity is
+// already in the identity manager, the reference count for the identity is
+// decremented. If the identity is not in the cache, this is a no-op. If the
+// ref count becomes zero, the identity is removed from the cache.
+func (idm *IdentityManager) Remove(identity *identity.Identity) {
+ log.WithFields(logrus.Fields{
+ logfields.Identity: identity,
+ }).Debug("Removing identity from the identity manager")
+
+ idm.mutex.Lock()
+ defer idm.mutex.Unlock()
+ idm.remove(identity)
+}
+
+func (idm *IdentityManager) remove(identity *identity.Identity) {
+
+ if identity == nil {
+ return
+ }
+
+ idMeta, exists := idm.identities[identity.ID]
+ if !exists {
+ log.WithFields(logrus.Fields{
+ logfields.Identity: identity,
+ }).Error("removing identity not added to the identity manager!")
+ return
+ }
+ idMeta.refCount--
+ if idMeta.refCount == 0 {
+ delete(idm.identities, identity.ID)
+ for o := range idm.observers {
+ o.LocalEndpointIdentityRemoved(identity)
+ }
+ }
+
+}
+
+// GetIdentityModels returns the API representation of the IdentityManager.
+func (idm *IdentityManager) GetIdentityModels() []*models.IdentityEndpoints {
+ idm.mutex.RLock()
+ defer idm.mutex.RUnlock()
+
+ identities := make([]*models.IdentityEndpoints, 0, len(idm.identities))
+
+ for _, v := range idm.identities {
+ identities = append(identities, &models.IdentityEndpoints{
+ Identity: model.CreateModel(v.identity),
+ RefCount: int64(v.refCount),
+ })
+ }
+
+ return identities
+}
+
+func (idm *IdentityManager) subscribe(o Observer) {
+ idm.mutex.Lock()
+ defer idm.mutex.Unlock()
+ idm.observers[o] = struct{}{}
+}
+
+// GetIdentityModels returns the API model of all identities in the
+// GlobalIdentityManager.
+func GetIdentityModels() []*models.IdentityEndpoints {
+ return GlobalIdentityManager.GetIdentityModels()
+}
+
+// IdentitiesModel is a wrapper so that we can implement the sort.Interface
+// to sort the slice by ID
+type IdentitiesModel []*models.IdentityEndpoints
+
+// Less returns true if the element in index `i` is lower than the element
+// in index `j`
+func (s IdentitiesModel) Less(i, j int) bool {
+ return s[i].Identity.ID < s[j].Identity.ID
+}
+
+// Subscribe adds the specified Observer to the global identity manager, to be
+// notified upon changes to local identity usage.
+func Subscribe(o Observer) {
+ GlobalIdentityManager.subscribe(o)
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/identity/identitymanager/observer.go b/vendor/github.com/cilium/cilium/pkg/identity/identitymanager/observer.go
new file mode 100644
index 000000000..c5eacc078
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/identity/identitymanager/observer.go
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package identitymanager
+
+import (
+ "github.com/cilium/cilium/pkg/identity"
+)
+
+// Observer can sign up to receive events whenever local identities are removed.
+type Observer interface {
+ // LocalEndpointIdentityAdded is called when an identity first becomes
+ // used on the node. Implementations must ensure that the callback
+ // returns within a reasonable period.
+ LocalEndpointIdentityAdded(*identity.Identity)
+
+ // LocalEndpointIdentityRemoved is called when an identity is no longer
+ // in use on the node. Implementations must ensure that the callback
+ // returns within a reasonable period.
+ LocalEndpointIdentityRemoved(*identity.Identity)
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/identity/key/global_identity.go b/vendor/github.com/cilium/cilium/pkg/identity/key/global_identity.go
new file mode 100644
index 000000000..fcbb8ed62
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/identity/key/global_identity.go
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package key
+
+import (
+ "strings"
+
+ "github.com/cilium/cilium/pkg/allocator"
+ "github.com/cilium/cilium/pkg/labels"
+)
+
+// GlobalIdentity is the structure used to store an identity
+type GlobalIdentity struct {
+ labels.LabelArray
+}
+
+// GetKey encodes an Identity as string
+func (gi *GlobalIdentity) GetKey() string {
+ var str strings.Builder
+ for _, l := range gi.LabelArray {
+ str.Write(l.FormatForKVStore())
+ }
+ return str.String()
+}
+
+// GetAsMap encodes a GlobalIdentity a map of keys to values. The keys will
+// include a source delimted by a ':'. This output is pareable by PutKeyFromMap.
+func (gi *GlobalIdentity) GetAsMap() map[string]string {
+ return gi.StringMap()
+}
+
+// PutKey decodes an Identity from its string representation
+func (gi *GlobalIdentity) PutKey(v string) allocator.AllocatorKey {
+ return &GlobalIdentity{labels.NewLabelArrayFromSortedList(v)}
+}
+
+// PutKeyFromMap decodes an Identity from a map of key to value. Output
+// from GetAsMap can be parsed.
+// Note: NewLabelArrayFromMap will parse the ':' separated label source from
+// the keys because the source parameter is ""
+func (gi *GlobalIdentity) PutKeyFromMap(v map[string]string) allocator.AllocatorKey {
+ return &GlobalIdentity{labels.Map2Labels(v, "").LabelArray()}
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/identity/model/identity.go b/vendor/github.com/cilium/cilium/pkg/identity/model/identity.go
new file mode 100644
index 000000000..4b99df3aa
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/identity/model/identity.go
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package model
+
+import (
+ "github.com/cilium/cilium/api/v1/models"
+ "github.com/cilium/cilium/pkg/identity"
+ "github.com/cilium/cilium/pkg/labels"
+)
+
+func NewIdentityFromModel(base *models.Identity) *identity.Identity {
+ if base == nil {
+ return nil
+ }
+
+ id := &identity.Identity{
+ ID: identity.NumericIdentity(base.ID),
+ Labels: make(labels.Labels, len(base.Labels)),
+ }
+ for _, v := range base.Labels {
+ lbl := labels.ParseLabel(v)
+ id.Labels[lbl.Key] = lbl
+ }
+ id.Sanitize()
+
+ return id
+}
+
+func CreateModel(id *identity.Identity) *models.Identity {
+ if id == nil {
+ return nil
+ }
+
+ ret := &models.Identity{
+ ID: int64(id.ID),
+ Labels: make([]string, 0, len(id.Labels)),
+ }
+
+ for _, v := range id.Labels {
+ ret.Labels = append(ret.Labels, v.String())
+ }
+ return ret
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/identity/numericidentity.go b/vendor/github.com/cilium/cilium/pkg/identity/numericidentity.go
index 841acd296..cb5526408 100644
--- a/vendor/github.com/cilium/cilium/pkg/identity/numericidentity.go
+++ b/vendor/github.com/cilium/cilium/pkg/identity/numericidentity.go
@@ -7,9 +7,12 @@ import (
"errors"
"fmt"
"math"
+ "net"
"sort"
"strconv"
+ "unsafe"
+ cmtypes "github.com/cilium/cilium/pkg/clustermesh/types"
api "github.com/cilium/cilium/pkg/k8s/apis/cilium.io"
"github.com/cilium/cilium/pkg/labels"
"github.com/cilium/cilium/pkg/lock"
@@ -21,9 +24,28 @@ const (
// shifted
ClusterIDShift = 16
- // LocalIdentityFlag is the bit in the numeric identity that identifies
- // a numeric identity to have local scope
- LocalIdentityFlag = NumericIdentity(1 << 24)
+ // Identities also have scopes, which is defined by the high 8 bits.
+ // 0x00 -- Global and reserved identities. Reserved identities are
+ // not allocated like global identities, but are known
+ // because they are hardcoded in Cilium. Older versions of
+ // Cilium will not be aware of any "new" reserved identities
+ // that are added.
+ // 0x01 -- local (CIDR) identities
+ // 0x02 -- remote nodes
+
+ // IdentityScopeMask is the top 8 bits of the 32 bit identity
+ IdentityScopeMask = NumericIdentity(0xFF_00_00_00)
+
+ // IdentityScopeGlobal is the identity scope used by global and reserved identities.
+ IdentityScopeGlobal = NumericIdentity(0)
+
+ // IdentityScopeLocal is the tag in the numeric identity that identifies
+ // a numeric identity to have local (CIDR) scope.
+ IdentityScopeLocal = NumericIdentity(1 << 24)
+
+ // IdentityScopeRemoteNode is the tag in the numeric identity that identifies
+ // an identity to be a remote in-cluster node.
+ IdentityScopeRemoteNode = NumericIdentity(2 << 24)
// MinAllocatorLocalIdentity represents the minimal numeric identity
// that the localIdentityCache allocator can allocate for a local (CIDR)
@@ -36,7 +58,7 @@ const (
// MinLocalIdentity represents the actual minimal numeric identity value
// for a local (CIDR) identity.
- MinLocalIdentity = MinAllocatorLocalIdentity | LocalIdentityFlag
+ MinLocalIdentity = MinAllocatorLocalIdentity | IdentityScopeLocal
// MaxAllocatorLocalIdentity represents the maximal numeric identity
// that the localIdentityCache allocator can allocate for a local (CIDR)
@@ -49,7 +71,7 @@ const (
// MaxLocalIdentity represents the actual maximal numeric identity value
// for a local (CIDR) identity.
- MaxLocalIdentity = MaxAllocatorLocalIdentity | LocalIdentityFlag
+ MaxLocalIdentity = MaxAllocatorLocalIdentity | IdentityScopeLocal
// MinimalNumericIdentity represents the minimal numeric identity not
// used for reserved purposes.
@@ -105,6 +127,14 @@ const (
// ReservedIdentityIngress is the identity given to the IP used as the source
// address for connections from Ingress proxies.
ReservedIdentityIngress
+
+ // ReservedIdentityWorldIPv4 represents any endpoint outside of the cluster
+ // for IPv4 address only.
+ ReservedIdentityWorldIPv4
+
+ // ReservedIdentityWorldIPv6 represents any endpoint outside of the cluster
+ // for IPv6 address only.
+ ReservedIdentityWorldIPv6
)
// Special identities for well-known cluster components
@@ -200,14 +230,16 @@ func (w wellKnownIdentities) lookupByNumericIdentity(identity NumericIdentity) *
}
type Configuration interface {
- LocalClusterName() string
CiliumNamespaceName() string
- LocalClusterID() uint32
+}
+
+func k8sLabel(key string, value string) string {
+ return "k8s:" + key + "=" + value
}
// InitWellKnownIdentities establishes all well-known identities. Returns the
// number of well-known identities initialized.
-func InitWellKnownIdentities(c Configuration) int {
+func InitWellKnownIdentities(c Configuration, cinfo cmtypes.ClusterInfo) int {
// etcd-operator labels
// k8s:io.cilium.k8s.policy.serviceaccount=cilium-etcd-sa
// k8s:io.kubernetes.pod.namespace=
@@ -215,13 +247,13 @@ func InitWellKnownIdentities(c Configuration) int {
// k8s:io.cilium.k8s.policy.cluster=default
etcdOperatorLabels := []string{
"k8s:io.cilium/app=etcd-operator",
- fmt.Sprintf("k8s:%s=%s", api.PodNamespaceLabel, c.CiliumNamespaceName()),
- fmt.Sprintf("k8s:%s=cilium-etcd-sa", api.PolicyLabelServiceAccount),
- fmt.Sprintf("k8s:%s=%s", api.PolicyLabelCluster, c.LocalClusterName()),
+ k8sLabel(api.PodNamespaceLabel, c.CiliumNamespaceName()),
+ k8sLabel(api.PolicyLabelServiceAccount, "cilium-etcd-sa"),
+ k8sLabel(api.PolicyLabelCluster, cinfo.Name),
}
WellKnown.add(ReservedETCDOperator, etcdOperatorLabels)
WellKnown.add(ReservedETCDOperator2, append(etcdOperatorLabels,
- fmt.Sprintf("k8s:%s=%s", api.PodNamespaceMetaNameLabel, c.CiliumNamespaceName())))
+ k8sLabel(api.PodNamespaceMetaNameLabel, c.CiliumNamespaceName())))
// cilium-etcd labels
// k8s:app=etcd
@@ -237,13 +269,13 @@ func InitWellKnownIdentities(c Configuration) int {
"k8s:app=etcd",
"k8s:etcd_cluster=cilium-etcd",
"k8s:io.cilium/app=etcd-operator",
- fmt.Sprintf("k8s:%s=%s", api.PodNamespaceLabel, c.CiliumNamespaceName()),
- fmt.Sprintf("k8s:%s=default", api.PolicyLabelServiceAccount),
- fmt.Sprintf("k8s:%s=%s", api.PolicyLabelCluster, c.LocalClusterName()),
+ k8sLabel(api.PodNamespaceLabel, c.CiliumNamespaceName()),
+ k8sLabel(api.PolicyLabelServiceAccount, "default"),
+ k8sLabel(api.PolicyLabelCluster, cinfo.Name),
}
WellKnown.add(ReservedCiliumKVStore, ciliumEtcdLabels)
WellKnown.add(ReservedCiliumKVStore2, append(ciliumEtcdLabels,
- fmt.Sprintf("k8s:%s=%s", api.PodNamespaceMetaNameLabel, c.CiliumNamespaceName())))
+ k8sLabel(api.PodNamespaceMetaNameLabel, c.CiliumNamespaceName())))
// kube-dns labels
// k8s:io.cilium.k8s.policy.serviceaccount=kube-dns
@@ -252,13 +284,13 @@ func InitWellKnownIdentities(c Configuration) int {
// k8s:io.cilium.k8s.policy.cluster=default
kubeDNSLabels := []string{
"k8s:k8s-app=kube-dns",
- fmt.Sprintf("k8s:%s=kube-system", api.PodNamespaceLabel),
- fmt.Sprintf("k8s:%s=kube-dns", api.PolicyLabelServiceAccount),
- fmt.Sprintf("k8s:%s=%s", api.PolicyLabelCluster, c.LocalClusterName()),
+ k8sLabel(api.PodNamespaceLabel, "kube-system"),
+ k8sLabel(api.PolicyLabelServiceAccount, "kube-dns"),
+ k8sLabel(api.PolicyLabelCluster, cinfo.Name),
}
WellKnown.add(ReservedKubeDNS, kubeDNSLabels)
WellKnown.add(ReservedKubeDNS2, append(kubeDNSLabels,
- fmt.Sprintf("k8s:%s=kube-system", api.PodNamespaceMetaNameLabel)))
+ k8sLabel(api.PodNamespaceMetaNameLabel, "kube-system")))
// kube-dns EKS labels
// k8s:io.cilium.k8s.policy.serviceaccount=kube-dns
@@ -269,13 +301,13 @@ func InitWellKnownIdentities(c Configuration) int {
eksKubeDNSLabels := []string{
"k8s:k8s-app=kube-dns",
"k8s:eks.amazonaws.com/component=kube-dns",
- fmt.Sprintf("k8s:%s=kube-system", api.PodNamespaceLabel),
- fmt.Sprintf("k8s:%s=kube-dns", api.PolicyLabelServiceAccount),
- fmt.Sprintf("k8s:%s=%s", api.PolicyLabelCluster, c.LocalClusterName()),
+ k8sLabel(api.PodNamespaceLabel, "kube-system"),
+ k8sLabel(api.PolicyLabelServiceAccount, "kube-dns"),
+ k8sLabel(api.PolicyLabelCluster, cinfo.Name),
}
WellKnown.add(ReservedEKSKubeDNS, eksKubeDNSLabels)
WellKnown.add(ReservedEKSKubeDNS2, append(eksKubeDNSLabels,
- fmt.Sprintf("k8s:%s=kube-system", api.PodNamespaceMetaNameLabel)))
+ k8sLabel(api.PodNamespaceMetaNameLabel, "kube-system")))
// CoreDNS EKS labels
// k8s:io.cilium.k8s.policy.serviceaccount=coredns
@@ -286,13 +318,13 @@ func InitWellKnownIdentities(c Configuration) int {
eksCoreDNSLabels := []string{
"k8s:k8s-app=kube-dns",
"k8s:eks.amazonaws.com/component=coredns",
- fmt.Sprintf("k8s:%s=kube-system", api.PodNamespaceLabel),
- fmt.Sprintf("k8s:%s=coredns", api.PolicyLabelServiceAccount),
- fmt.Sprintf("k8s:%s=%s", api.PolicyLabelCluster, c.LocalClusterName()),
+ k8sLabel(api.PodNamespaceLabel, "kube-system"),
+ k8sLabel(api.PolicyLabelServiceAccount, "coredns"),
+ k8sLabel(api.PolicyLabelCluster, cinfo.Name),
}
WellKnown.add(ReservedEKSCoreDNS, eksCoreDNSLabels)
WellKnown.add(ReservedEKSCoreDNS2, append(eksCoreDNSLabels,
- fmt.Sprintf("k8s:%s=kube-system", api.PodNamespaceMetaNameLabel)))
+ k8sLabel(api.PodNamespaceMetaNameLabel, "kube-system")))
// CoreDNS labels
// k8s:io.cilium.k8s.policy.serviceaccount=coredns
@@ -301,13 +333,13 @@ func InitWellKnownIdentities(c Configuration) int {
// k8s:io.cilium.k8s.policy.cluster=default
coreDNSLabels := []string{
"k8s:k8s-app=kube-dns",
- fmt.Sprintf("k8s:%s=kube-system", api.PodNamespaceLabel),
- fmt.Sprintf("k8s:%s=coredns", api.PolicyLabelServiceAccount),
- fmt.Sprintf("k8s:%s=%s", api.PolicyLabelCluster, c.LocalClusterName()),
+ k8sLabel(api.PodNamespaceLabel, "kube-system"),
+ k8sLabel(api.PolicyLabelServiceAccount, "coredns"),
+ k8sLabel(api.PolicyLabelCluster, cinfo.Name),
}
WellKnown.add(ReservedCoreDNS, coreDNSLabels)
WellKnown.add(ReservedCoreDNS2, append(coreDNSLabels,
- fmt.Sprintf("k8s:%s=kube-system", api.PodNamespaceMetaNameLabel)))
+ k8sLabel(api.PodNamespaceMetaNameLabel, "kube-system")))
// CiliumOperator labels
// k8s:io.cilium.k8s.policy.serviceaccount=cilium-operator
@@ -322,13 +354,13 @@ func InitWellKnownIdentities(c Configuration) int {
"k8s:io.cilium/app=operator",
"k8s:app.kubernetes.io/part-of=cilium",
"k8s:app.kubernetes.io/name=cilium-operator",
- fmt.Sprintf("k8s:%s=%s", api.PodNamespaceLabel, c.CiliumNamespaceName()),
- fmt.Sprintf("k8s:%s=cilium-operator", api.PolicyLabelServiceAccount),
- fmt.Sprintf("k8s:%s=%s", api.PolicyLabelCluster, c.LocalClusterName()),
+ k8sLabel(api.PodNamespaceLabel, c.CiliumNamespaceName()),
+ k8sLabel(api.PolicyLabelServiceAccount, "cilium-operator"),
+ k8sLabel(api.PolicyLabelCluster, cinfo.Name),
}
WellKnown.add(ReservedCiliumOperator, ciliumOperatorLabels)
WellKnown.add(ReservedCiliumOperator2, append(ciliumOperatorLabels,
- fmt.Sprintf("k8s:%s=%s", api.PodNamespaceMetaNameLabel, c.CiliumNamespaceName())))
+ k8sLabel(api.PodNamespaceMetaNameLabel, c.CiliumNamespaceName())))
// cilium-etcd-operator labels
// k8s:io.cilium.k8s.policy.cluster=default
@@ -343,29 +375,29 @@ func InitWellKnownIdentities(c Configuration) int {
"k8s:io.cilium/app=etcd-operator",
"k8s:app.kubernetes.io/name: cilium-etcd-operator",
"k8s:app.kubernetes.io/part-of: cilium",
- fmt.Sprintf("k8s:%s=%s", api.PodNamespaceLabel, c.CiliumNamespaceName()),
- fmt.Sprintf("k8s:%s=cilium-etcd-operator", api.PolicyLabelServiceAccount),
- fmt.Sprintf("k8s:%s=%s", api.PolicyLabelCluster, c.LocalClusterName()),
+ k8sLabel(api.PodNamespaceLabel, c.CiliumNamespaceName()),
+ k8sLabel(api.PolicyLabelServiceAccount, "cilium-etcd-operator"),
+ k8sLabel(api.PolicyLabelCluster, cinfo.Name),
}
WellKnown.add(ReservedCiliumEtcdOperator, ciliumEtcdOperatorLabels)
WellKnown.add(ReservedCiliumEtcdOperator2, append(ciliumEtcdOperatorLabels,
- fmt.Sprintf("k8s:%s=%s", api.PodNamespaceMetaNameLabel, c.CiliumNamespaceName())))
+ k8sLabel(api.PodNamespaceMetaNameLabel, c.CiliumNamespaceName())))
- InitMinMaxIdentityAllocation(c)
+ InitMinMaxIdentityAllocation(c, cinfo)
return len(WellKnown)
}
// InitMinMaxIdentityAllocation sets the minimal and maximum for identities that
// should be allocated in the cluster.
-func InitMinMaxIdentityAllocation(c Configuration) {
- if c.LocalClusterID() > 0 {
+func InitMinMaxIdentityAllocation(c Configuration, cinfo cmtypes.ClusterInfo) {
+ if cinfo.ID > 0 {
// For ClusterID > 0, the identity range just starts from cluster shift,
// no well-known-identities need to be reserved from the range.
- MinimalAllocationIdentity = NumericIdentity((1 << ClusterIDShift) * c.LocalClusterID())
+ MinimalAllocationIdentity = NumericIdentity((1 << ClusterIDShift) * cinfo.ID)
// The maximum identity also needs to be recalculated as ClusterID
// may be overwritten by runtime parameters.
- MaximumAllocationIdentity = NumericIdentity((1< 0 {
+ return p, s.AvailableAddresses
+ }
+ }
+ }
+
+ for poolID, s := range m {
+ if s.AvailableAddresses > 0 {
+ return PoolID(poolID), s.AvailableAddresses
+ }
+ }
+
+ return PoolNotExists, 0
+}
+
+// VirtualNetwork is the representation of a virtual network
+type VirtualNetwork struct {
+ // ID is the ID of the virtual network
+ ID string
+
+ // PrimaryCIDR is the primary IPv4 CIDR
+ PrimaryCIDR string
+
+ // CIDRs is the list of secondary IPv4 CIDR ranges associated with the VPC
+ CIDRs []string
+}
+
+// VirtualNetworkMap indexes virtual networks by their ID
+type VirtualNetworkMap map[string]*VirtualNetwork
+
+// PoolNotExists indicate that no such pool ID exists
+const PoolNotExists = PoolID("")
+
+// PoolUnspec indicates that the pool ID is unspecified
+const PoolUnspec = PoolNotExists
+
+// PoolID is the type used to identify an IPAM pool
+type PoolID string
+
+// PoolQuota defines the limits of an IPAM pool
+type PoolQuota struct {
+ // AvailabilityZone is the availability zone in which the IPAM pool resides in
+ AvailabilityZone string
+
+ // AvailableIPs is the number of available IPs in the pool
+ AvailableIPs int
+}
+
+// PoolQuotaMap is a map of pool quotas indexes by pool identifier
+type PoolQuotaMap map[PoolID]PoolQuota
+
+// Interface is the implementation of a IPAM relevant network interface
+// +k8s:deepcopy-gen=false
+// +deepequal-gen=false
+type Interface interface {
+ // InterfaceID must return the identifier of the interface
+ InterfaceID() string
+
+ // ForeachAddress must iterate over all addresses of the interface and
+ // call fn for each address
+ ForeachAddress(instanceID string, fn AddressIterator) error
+}
+
+// InterfaceRevision is the configurationr revision of a network interface. It
+// consists of a revision hash representing the current configuration version
+// and the resource itself.
+//
+// +k8s:deepcopy-gen=false
+// +deepequal-gen=false
+type InterfaceRevision struct {
+ // Resource is the interface resource
+ Resource Interface
+
+ // Fingerprint is the fingerprint reprsenting the network interface
+ // configuration. It is typically implemented as the result of a hash
+ // function calculated off the resource. This field is optional, not
+ // all IPAM backends make use of fingerprints.
+ Fingerprint string
+}
+
+// Instance is the representation of an instance, typically a VM, subject to
+// per-node IPAM logic
+//
+// +k8s:deepcopy-gen=false
+// +deepequal-gen=false
+type Instance struct {
+ // interfaces is a map of all interfaces attached to the instance
+ // indexed by the interface ID
+ Interfaces map[string]InterfaceRevision
+}
+
+// InstanceMap is the list of all instances indexed by instance ID
+//
+// +k8s:deepcopy-gen=false
+// +deepequal-gen=false
+type InstanceMap struct {
+ mutex lock.RWMutex
+ data map[string]*Instance
+}
+
+// NewInstanceMap returns a new InstanceMap
+func NewInstanceMap() *InstanceMap {
+ return &InstanceMap{data: map[string]*Instance{}}
+}
+
+// UpdateInstance updates the interfaces map for a particular instance.
+func (m *InstanceMap) UpdateInstance(instanceID string, instance *Instance) {
+ m.mutex.Lock()
+ m.data[instanceID] = instance
+ m.mutex.Unlock()
+}
+
+// Update updates the definition of an interface for a particular instance. If
+// the interface is already known, the definition is updated, otherwise the
+// interface is added to the instance.
+func (m *InstanceMap) Update(instanceID string, iface InterfaceRevision) {
+ m.mutex.Lock()
+ m.updateLocked(instanceID, iface)
+ m.mutex.Unlock()
+}
+
+func (m *InstanceMap) updateLocked(instanceID string, iface InterfaceRevision) {
+ if iface.Resource == nil {
+ return
+ }
+
+ i, ok := m.data[instanceID]
+ if !ok {
+ i = &Instance{}
+ m.data[instanceID] = i
+ }
+
+ if i.Interfaces == nil {
+ i.Interfaces = map[string]InterfaceRevision{}
+ }
+
+ i.Interfaces[iface.Resource.InterfaceID()] = iface
+}
+
+type Address interface{}
+
+// AddressIterator is the function called by the ForeachAddress iterator
+type AddressIterator func(instanceID, interfaceID, ip, poolID string, address Address) error
+
+func foreachAddress(instanceID string, instance *Instance, fn AddressIterator) error {
+ for _, rev := range instance.Interfaces {
+ if err := rev.Resource.ForeachAddress(instanceID, fn); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// ForeachAddress calls fn for each address on each interface attached to each
+// instance. If an instanceID is specified, the only the interfaces and
+// addresses of the specified instance are considered.
+//
+// The InstanceMap is read-locked throughout the iteration process, i.e., no
+// updates will occur. However, the address object given to the AddressIterator
+// will point to live data and must be deep copied if used outside of the
+// context of the iterator function.
+func (m *InstanceMap) ForeachAddress(instanceID string, fn AddressIterator) error {
+ m.mutex.RLock()
+ defer m.mutex.RUnlock()
+
+ if instanceID != "" {
+ if instance := m.data[instanceID]; instance != nil {
+ return foreachAddress(instanceID, instance, fn)
+ }
+ return fmt.Errorf("instance does not exist: %q", instanceID)
+ }
+
+ for instanceID, instance := range m.data {
+ if err := foreachAddress(instanceID, instance, fn); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// InterfaceIterator is the function called by the ForeachInterface iterator
+type InterfaceIterator func(instanceID, interfaceID string, iface InterfaceRevision) error
+
+func foreachInterface(instanceID string, instance *Instance, fn InterfaceIterator) error {
+ for _, rev := range instance.Interfaces {
+ if err := fn(instanceID, rev.Resource.InterfaceID(), rev); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// ForeachInterface calls fn for each interface on each interface attached to
+// each instance. If an instanceID is specified, the only the interfaces and
+// addresses of the specified instance are considered.
+//
+// The InstanceMap is read-locked throughout the iteration process, i.e., no
+// updates will occur. However, the address object given to the InterfaceIterator
+// will point to live data and must be deep copied if used outside of the
+// context of the iterator function.
+func (m *InstanceMap) ForeachInterface(instanceID string, fn InterfaceIterator) error {
+ m.mutex.RLock()
+ defer m.mutex.RUnlock()
+
+ if instanceID != "" {
+ if instance := m.data[instanceID]; instance != nil {
+ return foreachInterface(instanceID, instance, fn)
+ }
+ return fmt.Errorf("instance does not exist: %q", instanceID)
+ }
+ for instanceID, instance := range m.data {
+ if err := foreachInterface(instanceID, instance, fn); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// GetInterface returns returns a particular interface of an instance. The
+// boolean indicates whether the interface was found or not.
+func (m *InstanceMap) GetInterface(instanceID, interfaceID string) (InterfaceRevision, bool) {
+ m.mutex.RLock()
+ defer m.mutex.RUnlock()
+
+ if instance := m.data[instanceID]; instance != nil {
+ if rev, ok := instance.Interfaces[interfaceID]; ok {
+ return rev, true
+ }
+ }
+
+ return InterfaceRevision{}, false
+}
+
+// DeepCopy returns a deep copy
+func (m *InstanceMap) DeepCopy() *InstanceMap {
+ c := NewInstanceMap()
+ m.ForeachInterface("", func(instanceID, interfaceID string, rev InterfaceRevision) error {
+ // c is not exposed yet, we can access it without locking it
+ c.updateLocked(instanceID, rev)
+ return nil
+ })
+ return c
+}
+
+// NumInstances returns the number of instances in the instance map
+func (m *InstanceMap) NumInstances() (size int) {
+ m.mutex.RLock()
+ size = len(m.data)
+ m.mutex.RUnlock()
+ return
+}
+
+// Exists returns whether the instance ID is in the instanceMap
+func (m *InstanceMap) Exists(instanceID string) (exists bool) {
+ m.mutex.RLock()
+ defer m.mutex.RUnlock()
+ if instance := m.data[instanceID]; instance != nil {
+ return true
+ }
+ return false
+}
+
+// Delete instance from m.data
+func (m *InstanceMap) Delete(instanceID string) {
+ m.mutex.Lock()
+ defer m.mutex.Unlock()
+ delete(m.data, instanceID)
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/ipam/types/zz_generated.deepcopy.go b/vendor/github.com/cilium/cilium/pkg/ipam/types/zz_generated.deepcopy.go
new file mode 100644
index 000000000..dc090499f
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/ipam/types/zz_generated.deepcopy.go
@@ -0,0 +1,434 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package types
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AllocationIP) DeepCopyInto(out *AllocationIP) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllocationIP.
+func (in *AllocationIP) DeepCopy() *AllocationIP {
+ if in == nil {
+ return nil
+ }
+ out := new(AllocationIP)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in AllocationMap) DeepCopyInto(out *AllocationMap) {
+ {
+ in := &in
+ *out = make(AllocationMap, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ return
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllocationMap.
+func (in AllocationMap) DeepCopy() AllocationMap {
+ if in == nil {
+ return nil
+ }
+ out := new(AllocationMap)
+ in.DeepCopyInto(out)
+ return *out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IPAMPoolAllocation) DeepCopyInto(out *IPAMPoolAllocation) {
+ *out = *in
+ if in.CIDRs != nil {
+ in, out := &in.CIDRs, &out.CIDRs
+ *out = make([]IPAMPodCIDR, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAMPoolAllocation.
+func (in *IPAMPoolAllocation) DeepCopy() *IPAMPoolAllocation {
+ if in == nil {
+ return nil
+ }
+ out := new(IPAMPoolAllocation)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IPAMPoolDemand) DeepCopyInto(out *IPAMPoolDemand) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAMPoolDemand.
+func (in *IPAMPoolDemand) DeepCopy() *IPAMPoolDemand {
+ if in == nil {
+ return nil
+ }
+ out := new(IPAMPoolDemand)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IPAMPoolRequest) DeepCopyInto(out *IPAMPoolRequest) {
+ *out = *in
+ out.Needed = in.Needed
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAMPoolRequest.
+func (in *IPAMPoolRequest) DeepCopy() *IPAMPoolRequest {
+ if in == nil {
+ return nil
+ }
+ out := new(IPAMPoolRequest)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IPAMPoolSpec) DeepCopyInto(out *IPAMPoolSpec) {
+ *out = *in
+ if in.Requested != nil {
+ in, out := &in.Requested, &out.Requested
+ *out = make([]IPAMPoolRequest, len(*in))
+ copy(*out, *in)
+ }
+ if in.Allocated != nil {
+ in, out := &in.Allocated, &out.Allocated
+ *out = make([]IPAMPoolAllocation, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAMPoolSpec.
+func (in *IPAMPoolSpec) DeepCopy() *IPAMPoolSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(IPAMPoolSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IPAMSpec) DeepCopyInto(out *IPAMSpec) {
+ *out = *in
+ if in.Pool != nil {
+ in, out := &in.Pool, &out.Pool
+ *out = make(AllocationMap, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ in.Pools.DeepCopyInto(&out.Pools)
+ if in.PodCIDRs != nil {
+ in, out := &in.PodCIDRs, &out.PodCIDRs
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAMSpec.
+func (in *IPAMSpec) DeepCopy() *IPAMSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(IPAMSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IPAMStatus) DeepCopyInto(out *IPAMStatus) {
+ *out = *in
+ if in.Used != nil {
+ in, out := &in.Used, &out.Used
+ *out = make(AllocationMap, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.PodCIDRs != nil {
+ in, out := &in.PodCIDRs, &out.PodCIDRs
+ *out = make(PodCIDRMap, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ out.OperatorStatus = in.OperatorStatus
+ if in.ReleaseIPs != nil {
+ in, out := &in.ReleaseIPs, &out.ReleaseIPs
+ *out = make(map[string]IPReleaseStatus, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAMStatus.
+func (in *IPAMStatus) DeepCopy() *IPAMStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(IPAMStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Limits) DeepCopyInto(out *Limits) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Limits.
+func (in *Limits) DeepCopy() *Limits {
+ if in == nil {
+ return nil
+ }
+ out := new(Limits)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OperatorStatus) DeepCopyInto(out *OperatorStatus) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorStatus.
+func (in *OperatorStatus) DeepCopy() *OperatorStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(OperatorStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in PodCIDRMap) DeepCopyInto(out *PodCIDRMap) {
+ {
+ in := &in
+ *out = make(PodCIDRMap, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ return
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodCIDRMap.
+func (in PodCIDRMap) DeepCopy() PodCIDRMap {
+ if in == nil {
+ return nil
+ }
+ out := new(PodCIDRMap)
+ in.DeepCopyInto(out)
+ return *out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PodCIDRMapEntry) DeepCopyInto(out *PodCIDRMapEntry) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodCIDRMapEntry.
+func (in *PodCIDRMapEntry) DeepCopy() *PodCIDRMapEntry {
+ if in == nil {
+ return nil
+ }
+ out := new(PodCIDRMapEntry)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PoolQuota) DeepCopyInto(out *PoolQuota) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PoolQuota.
+func (in *PoolQuota) DeepCopy() *PoolQuota {
+ if in == nil {
+ return nil
+ }
+ out := new(PoolQuota)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in PoolQuotaMap) DeepCopyInto(out *PoolQuotaMap) {
+ {
+ in := &in
+ *out = make(PoolQuotaMap, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ return
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PoolQuotaMap.
+func (in PoolQuotaMap) DeepCopy() PoolQuotaMap {
+ if in == nil {
+ return nil
+ }
+ out := new(PoolQuotaMap)
+ in.DeepCopyInto(out)
+ return *out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Subnet) DeepCopyInto(out *Subnet) {
+ *out = *in
+ if in.CIDR != nil {
+ in, out := &in.CIDR, &out.CIDR
+ *out = (*in).DeepCopy()
+ }
+ if in.Tags != nil {
+ in, out := &in.Tags, &out.Tags
+ *out = make(Tags, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Subnet.
+func (in *Subnet) DeepCopy() *Subnet {
+ if in == nil {
+ return nil
+ }
+ out := new(Subnet)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in SubnetMap) DeepCopyInto(out *SubnetMap) {
+ {
+ in := &in
+ *out = make(SubnetMap, len(*in))
+ for key, val := range *in {
+ var outVal *Subnet
+ if val == nil {
+ (*out)[key] = nil
+ } else {
+ in, out := &val, &outVal
+ *out = new(Subnet)
+ (*in).DeepCopyInto(*out)
+ }
+ (*out)[key] = outVal
+ }
+ return
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubnetMap.
+func (in SubnetMap) DeepCopy() SubnetMap {
+ if in == nil {
+ return nil
+ }
+ out := new(SubnetMap)
+ in.DeepCopyInto(out)
+ return *out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in Tags) DeepCopyInto(out *Tags) {
+ {
+ in := &in
+ *out = make(Tags, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ return
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Tags.
+func (in Tags) DeepCopy() Tags {
+ if in == nil {
+ return nil
+ }
+ out := new(Tags)
+ in.DeepCopyInto(out)
+ return *out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *VirtualNetwork) DeepCopyInto(out *VirtualNetwork) {
+ *out = *in
+ if in.CIDRs != nil {
+ in, out := &in.CIDRs, &out.CIDRs
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNetwork.
+func (in *VirtualNetwork) DeepCopy() *VirtualNetwork {
+ if in == nil {
+ return nil
+ }
+ out := new(VirtualNetwork)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in VirtualNetworkMap) DeepCopyInto(out *VirtualNetworkMap) {
+ {
+ in := &in
+ *out = make(VirtualNetworkMap, len(*in))
+ for key, val := range *in {
+ var outVal *VirtualNetwork
+ if val == nil {
+ (*out)[key] = nil
+ } else {
+ in, out := &val, &outVal
+ *out = new(VirtualNetwork)
+ (*in).DeepCopyInto(*out)
+ }
+ (*out)[key] = outVal
+ }
+ return
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNetworkMap.
+func (in VirtualNetworkMap) DeepCopy() VirtualNetworkMap {
+ if in == nil {
+ return nil
+ }
+ out := new(VirtualNetworkMap)
+ in.DeepCopyInto(out)
+ return *out
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/ipam/types/zz_generated.deepequal.go b/vendor/github.com/cilium/cilium/pkg/ipam/types/zz_generated.deepequal.go
new file mode 100644
index 000000000..9749444d6
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/ipam/types/zz_generated.deepequal.go
@@ -0,0 +1,519 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by deepequal-gen. DO NOT EDIT.
+
+package types
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *AllocationIP) DeepEqual(other *AllocationIP) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.Owner != other.Owner {
+ return false
+ }
+ if in.Resource != other.Resource {
+ return false
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *AllocationMap) DeepEqual(other *AllocationMap) bool {
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for key, inValue := range *in {
+ if otherValue, present := (*other)[key]; !present {
+ return false
+ } else {
+ if !inValue.DeepEqual(&otherValue) {
+ return false
+ }
+ }
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *IPAMPoolAllocation) DeepEqual(other *IPAMPoolAllocation) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.Pool != other.Pool {
+ return false
+ }
+ if ((in.CIDRs != nil) && (other.CIDRs != nil)) || ((in.CIDRs == nil) != (other.CIDRs == nil)) {
+ in, other := &in.CIDRs, &other.CIDRs
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if inElement != (*other)[i] {
+ return false
+ }
+ }
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *IPAMPoolDemand) DeepEqual(other *IPAMPoolDemand) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.IPv4Addrs != other.IPv4Addrs {
+ return false
+ }
+ if in.IPv6Addrs != other.IPv6Addrs {
+ return false
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *IPAMPoolRequest) DeepEqual(other *IPAMPoolRequest) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.Pool != other.Pool {
+ return false
+ }
+ if in.Needed != other.Needed {
+ return false
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *IPAMPoolSpec) DeepEqual(other *IPAMPoolSpec) bool {
+ if other == nil {
+ return false
+ }
+
+ if ((in.Requested != nil) && (other.Requested != nil)) || ((in.Requested == nil) != (other.Requested == nil)) {
+ in, other := &in.Requested, &other.Requested
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if !inElement.DeepEqual(&(*other)[i]) {
+ return false
+ }
+ }
+ }
+ }
+
+ if ((in.Allocated != nil) && (other.Allocated != nil)) || ((in.Allocated == nil) != (other.Allocated == nil)) {
+ in, other := &in.Allocated, &other.Allocated
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if !inElement.DeepEqual(&(*other)[i]) {
+ return false
+ }
+ }
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *IPAMSpec) DeepEqual(other *IPAMSpec) bool {
+ if other == nil {
+ return false
+ }
+
+ if ((in.Pool != nil) && (other.Pool != nil)) || ((in.Pool == nil) != (other.Pool == nil)) {
+ in, other := &in.Pool, &other.Pool
+ if other == nil || !in.DeepEqual(other) {
+ return false
+ }
+ }
+
+ if !in.Pools.DeepEqual(&other.Pools) {
+ return false
+ }
+
+ if ((in.PodCIDRs != nil) && (other.PodCIDRs != nil)) || ((in.PodCIDRs == nil) != (other.PodCIDRs == nil)) {
+ in, other := &in.PodCIDRs, &other.PodCIDRs
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if inElement != (*other)[i] {
+ return false
+ }
+ }
+ }
+ }
+
+ if in.MinAllocate != other.MinAllocate {
+ return false
+ }
+ if in.MaxAllocate != other.MaxAllocate {
+ return false
+ }
+ if in.PreAllocate != other.PreAllocate {
+ return false
+ }
+ if in.MaxAboveWatermark != other.MaxAboveWatermark {
+ return false
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *IPAMStatus) DeepEqual(other *IPAMStatus) bool {
+ if other == nil {
+ return false
+ }
+
+ if ((in.Used != nil) && (other.Used != nil)) || ((in.Used == nil) != (other.Used == nil)) {
+ in, other := &in.Used, &other.Used
+ if other == nil || !in.DeepEqual(other) {
+ return false
+ }
+ }
+
+ if ((in.PodCIDRs != nil) && (other.PodCIDRs != nil)) || ((in.PodCIDRs == nil) != (other.PodCIDRs == nil)) {
+ in, other := &in.PodCIDRs, &other.PodCIDRs
+ if other == nil || !in.DeepEqual(other) {
+ return false
+ }
+ }
+
+ if in.OperatorStatus != other.OperatorStatus {
+ return false
+ }
+
+ if ((in.ReleaseIPs != nil) && (other.ReleaseIPs != nil)) || ((in.ReleaseIPs == nil) != (other.ReleaseIPs == nil)) {
+ in, other := &in.ReleaseIPs, &other.ReleaseIPs
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for key, inValue := range *in {
+ if otherValue, present := (*other)[key]; !present {
+ return false
+ } else {
+ if inValue != otherValue {
+ return false
+ }
+ }
+ }
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *Limits) DeepEqual(other *Limits) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.Adapters != other.Adapters {
+ return false
+ }
+ if in.IPv4 != other.IPv4 {
+ return false
+ }
+ if in.IPv6 != other.IPv6 {
+ return false
+ }
+ if in.HypervisorType != other.HypervisorType {
+ return false
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *OperatorStatus) DeepEqual(other *OperatorStatus) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.Error != other.Error {
+ return false
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *PodCIDRMap) DeepEqual(other *PodCIDRMap) bool {
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for key, inValue := range *in {
+ if otherValue, present := (*other)[key]; !present {
+ return false
+ } else {
+ if !inValue.DeepEqual(&otherValue) {
+ return false
+ }
+ }
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *PodCIDRMapEntry) DeepEqual(other *PodCIDRMapEntry) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.Status != other.Status {
+ return false
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *PoolQuota) DeepEqual(other *PoolQuota) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.AvailabilityZone != other.AvailabilityZone {
+ return false
+ }
+ if in.AvailableIPs != other.AvailableIPs {
+ return false
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *PoolQuotaMap) DeepEqual(other *PoolQuotaMap) bool {
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for key, inValue := range *in {
+ if otherValue, present := (*other)[key]; !present {
+ return false
+ } else {
+ if !inValue.DeepEqual(&otherValue) {
+ return false
+ }
+ }
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *Subnet) DeepEqual(other *Subnet) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.ID != other.ID {
+ return false
+ }
+ if in.Name != other.Name {
+ return false
+ }
+ if (in.CIDR == nil) != (other.CIDR == nil) {
+ return false
+ } else if in.CIDR != nil {
+ if !in.CIDR.DeepEqual(other.CIDR) {
+ return false
+ }
+ }
+
+ if in.AvailabilityZone != other.AvailabilityZone {
+ return false
+ }
+ if in.VirtualNetworkID != other.VirtualNetworkID {
+ return false
+ }
+ if in.AvailableAddresses != other.AvailableAddresses {
+ return false
+ }
+ if ((in.Tags != nil) && (other.Tags != nil)) || ((in.Tags == nil) != (other.Tags == nil)) {
+ in, other := &in.Tags, &other.Tags
+ if other == nil || !in.DeepEqual(other) {
+ return false
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *SubnetMap) DeepEqual(other *SubnetMap) bool {
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for key, inValue := range *in {
+ if otherValue, present := (*other)[key]; !present {
+ return false
+ } else {
+ if !inValue.DeepEqual(otherValue) {
+ return false
+ }
+ }
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *Tags) DeepEqual(other *Tags) bool {
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for key, inValue := range *in {
+ if otherValue, present := (*other)[key]; !present {
+ return false
+ } else {
+ if inValue != otherValue {
+ return false
+ }
+ }
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *VirtualNetwork) DeepEqual(other *VirtualNetwork) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.ID != other.ID {
+ return false
+ }
+ if in.PrimaryCIDR != other.PrimaryCIDR {
+ return false
+ }
+ if ((in.CIDRs != nil) && (other.CIDRs != nil)) || ((in.CIDRs == nil) != (other.CIDRs == nil)) {
+ in, other := &in.CIDRs, &other.CIDRs
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if inElement != (*other)[i] {
+ return false
+ }
+ }
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *VirtualNetworkMap) DeepEqual(other *VirtualNetworkMap) bool {
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for key, inValue := range *in {
+ if otherValue, present := (*other)[key]; !present {
+ return false
+ } else {
+ if !inValue.DeepEqual(otherValue) {
+ return false
+ }
+ }
+ }
+ }
+
+ return true
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/ipcache/types/entries.go b/vendor/github.com/cilium/cilium/pkg/ipcache/types/entries.go
new file mode 100644
index 000000000..1377e25b1
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/ipcache/types/entries.go
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package types
+
+import (
+ "bytes"
+ "net"
+
+ "github.com/cilium/cilium/api/v1/models"
+)
+
+type IPListEntrySlice []*models.IPListEntry
+
+func (s IPListEntrySlice) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+// Less sorts the IPListEntry objects by CIDR prefix then IP address.
+// Given that the same IP cannot map to more than one identity, no further
+// sorting is performed.
+func (s IPListEntrySlice) Less(i, j int) bool {
+ _, iNet, _ := net.ParseCIDR(*s[i].Cidr)
+ _, jNet, _ := net.ParseCIDR(*s[j].Cidr)
+ iPrefixSize, _ := iNet.Mask.Size()
+ jPrefixSize, _ := jNet.Mask.Size()
+ if iPrefixSize == jPrefixSize {
+ return bytes.Compare(iNet.IP, jNet.IP) < 0
+ }
+ return iPrefixSize < jPrefixSize
+}
+
+func (s IPListEntrySlice) Len() int {
+ return len(s)
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/ipcache/types/types.go b/vendor/github.com/cilium/cilium/pkg/ipcache/types/types.go
new file mode 100644
index 000000000..85537b589
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/ipcache/types/types.go
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package types
+
+import (
+ "context"
+ "net"
+ "net/netip"
+ "strconv"
+ "strings"
+ "sync"
+
+ "github.com/cilium/cilium/pkg/identity/cache"
+)
+
+// PolicyHandler is responsible for handling identity updates into the core
+// policy engine. See SelectorCache.UpdateIdentities() for more details.
+type PolicyHandler interface {
+ UpdateIdentities(added, deleted cache.IdentityCache, wg *sync.WaitGroup)
+}
+
+// DatapathHandler is responsible for ensuring that policy updates in the
+// core policy engine are pushed into the underlying BPF policy maps, to ensure
+// that the policies are actively being enforced in the datapath for any new
+// identities that have been updated using 'PolicyHandler'.
+//
+// Wait on the returned sync.WaitGroup to ensure that the operation is complete
+// before updating the datapath's IPCache maps.
+type DatapathHandler interface {
+ UpdatePolicyMaps(context.Context, *sync.WaitGroup) *sync.WaitGroup
+}
+
+// ResourceID identifies a unique copy of a resource that provides a source for
+// information tied to an IP address in the IPCache.
+type ResourceID string
+
+// ResourceKind determines the source of the ResourceID. Typically this is the
+// short name for the k8s resource.
+type ResourceKind string
+
+var (
+ ResourceKindCNP = ResourceKind("cnp")
+ ResourceKindCCNP = ResourceKind("ccnp")
+ ResourceKindDaemon = ResourceKind("daemon")
+ ResourceKindEndpoint = ResourceKind("ep")
+ ResourceKindNetpol = ResourceKind("netpol")
+ ResourceKindNode = ResourceKind("node")
+)
+
+// NewResourceID returns a ResourceID populated with the standard fields for
+// uniquely identifying a source of IPCache information.
+func NewResourceID(kind ResourceKind, namespace, name string) ResourceID {
+ str := strings.Builder{}
+ str.Grow(len(kind) + 1 + len(namespace) + 1 + len(name))
+ str.WriteString(string(kind))
+ str.WriteRune('/')
+ str.WriteString(namespace)
+ str.WriteRune('/')
+ str.WriteString(name)
+ return ResourceID(str.String())
+}
+
+// TunnelPeer is the IP address of the host associated with this prefix. This is
+// typically used to establish a tunnel, e.g. in tunnel mode or for encryption.
+// This type implements ipcache.IPMetadata
+type TunnelPeer struct{ netip.Addr }
+
+func (t TunnelPeer) IP() net.IP {
+ return t.AsSlice()
+}
+
+// EncryptKey is the identity of the encryption key.
+// This type implements ipcache.IPMetadata
+type EncryptKey uint8
+
+const EncryptKeyEmpty = EncryptKey(0)
+
+func (e EncryptKey) IsValid() bool {
+ return e != EncryptKeyEmpty
+}
+
+func (e EncryptKey) Uint8() uint8 {
+ return uint8(e)
+}
+
+func (e EncryptKey) String() string {
+ return strconv.Itoa(int(e))
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/const.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/const.go
index aa9c0318b..80f8d84db 100644
--- a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/const.go
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/const.go
@@ -61,6 +61,16 @@ const (
// documentation add the label for every resource object.
AppKubernetes = "app.kubernetes.io"
+ // StatefulSetPodNameLabel is the label name which, in-tree, is used to
+ // automatically label Pods that are owned by StatefulSets with their name,
+ // so that one can attach a Service to a specific Pod in the StatefulSet.
+ StatefulSetPodNameLabel = "statefulset.kubernetes.io/pod-name"
+
+ // StatefulSetPodIndexLabel is the label name which, in-tree, is used to
+ // automatically label Pods that are owned by StatefulSets with their
+ // ordinal index.
+ StatefulSetPodIndexLabel = "apps.kubernetes.io/pod-index"
+
// CtrlPrefixPolicyStatus is the prefix used for the controllers set up
// to sync the CNP with kube-apiserver.
CtrlPrefixPolicyStatus = "sync-cnp-policy-status"
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/register.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/register.go
index cfc12d39a..7585346d1 100644
--- a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/register.go
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/register.go
@@ -15,5 +15,5 @@ const (
//
// Maintainers: Run ./Documentation/check-crd-compat-table.sh for each release
// Developers: Bump patch for each change in the CRD schema.
- CustomResourceDefinitionSchemaVersion = "1.26.9"
+ CustomResourceDefinitionSchemaVersion = "1.26.10"
)
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/utils/utils.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/utils/utils.go
new file mode 100644
index 000000000..28cb2e19e
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/utils/utils.go
@@ -0,0 +1,349 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package utils
+
+import (
+ "github.com/sirupsen/logrus"
+ "k8s.io/apimachinery/pkg/types"
+
+ k8sConst "github.com/cilium/cilium/pkg/k8s/apis/cilium.io"
+ slim_metav1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1"
+ "github.com/cilium/cilium/pkg/labels"
+ "github.com/cilium/cilium/pkg/logging"
+ "github.com/cilium/cilium/pkg/logging/logfields"
+ "github.com/cilium/cilium/pkg/policy/api"
+)
+
+const (
+ // subsysK8s is the value for logfields.LogSubsys
+ subsysK8s = "k8s"
+ // podPrefixLbl is the value the prefix used in the label selector to
+ // represent pods on the default namespace.
+ podPrefixLbl = labels.LabelSourceK8sKeyPrefix + k8sConst.PodNamespaceLabel
+
+ // podAnyPrefixLbl is the value of the prefix used in the label selector to
+ // represent pods in the default namespace for any source type.
+ podAnyPrefixLbl = labels.LabelSourceAnyKeyPrefix + k8sConst.PodNamespaceLabel
+
+ // podInitLbl is the label used in a label selector to match on
+ // initializing pods.
+ podInitLbl = labels.LabelSourceReservedKeyPrefix + labels.IDNameInit
+
+ // ResourceTypeCiliumNetworkPolicy is the resource type used for the
+ // PolicyLabelDerivedFrom label
+ ResourceTypeCiliumNetworkPolicy = "CiliumNetworkPolicy"
+
+ // ResourceTypeCiliumClusterwideNetworkPolicy is the resource type used for the
+ // PolicyLabelDerivedFrom label
+ ResourceTypeCiliumClusterwideNetworkPolicy = "CiliumClusterwideNetworkPolicy"
+)
+
+var (
+ // log is the k8s package logger object.
+ log = logging.DefaultLogger.WithField(logfields.LogSubsys, subsysK8s)
+)
+
+// GetPolicyLabels returns a LabelArray for the given namespace and name.
+func GetPolicyLabels(ns, name string, uid types.UID, derivedFrom string) labels.LabelArray {
+ // Keep labels sorted by the key.
+ labelsArr := labels.LabelArray{
+ labels.NewLabel(k8sConst.PolicyLabelDerivedFrom, derivedFrom, labels.LabelSourceK8s),
+ labels.NewLabel(k8sConst.PolicyLabelName, name, labels.LabelSourceK8s),
+ }
+
+ // For clusterwide policy namespace will be empty.
+ if ns != "" {
+ nsLabel := labels.NewLabel(k8sConst.PolicyLabelNamespace, ns, labels.LabelSourceK8s)
+ labelsArr = append(labelsArr, nsLabel)
+ }
+
+ srcLabel := labels.NewLabel(k8sConst.PolicyLabelUID, string(uid), labels.LabelSourceK8s)
+ return append(labelsArr, srcLabel)
+}
+
+// getEndpointSelector converts the provided labelSelector into an EndpointSelector,
+// adding the relevant matches for namespaces based on the provided options.
+// If no namespace is provided then it is assumed that the selector is global to the cluster
+// this is when translating selectors for CiliumClusterwideNetworkPolicy.
+func getEndpointSelector(namespace string, labelSelector *slim_metav1.LabelSelector, addK8sPrefix, matchesInit bool) api.EndpointSelector {
+ es := api.NewESFromK8sLabelSelector("", labelSelector)
+
+ // The k8s prefix must not be added to reserved labels.
+ if addK8sPrefix && es.HasKeyPrefix(labels.LabelSourceReservedKeyPrefix) {
+ return es
+ }
+
+ // The user can explicitly specify the namespace in the
+ // FromEndpoints selector. If omitted, we limit the
+ // scope to the namespace the policy lives in.
+ //
+ // Policies applying on initializing pods are a special case.
+ // Those pods don't have any labels, so they don't have a namespace label either.
+ // Don't add a namespace label to those endpoint selectors, or we wouldn't be
+ // able to match on those pods.
+ if !es.HasKey(podPrefixLbl) && !es.HasKey(podAnyPrefixLbl) {
+ if namespace == "" {
+ // For a clusterwide policy if a namespace is not specified in the labels we add
+ // a selector to only match endpoints that contains a namespace label.
+ // This is to make sure that we are only allowing traffic for cilium managed k8s endpoints
+ // and even if a wildcard is provided in the selector we don't proceed with a truly
+ // empty(allow all) endpoint selector for the policy.
+ if !matchesInit {
+ es.AddMatchExpression(podPrefixLbl, slim_metav1.LabelSelectorOpExists, []string{})
+ }
+ } else {
+ es.AddMatch(podPrefixLbl, namespace)
+ }
+ }
+
+ return es
+}
+
+func parseToCiliumIngressCommonRule(namespace string, es api.EndpointSelector, ing api.IngressCommonRule) api.IngressCommonRule {
+ matchesInit := matchesPodInit(es)
+ var retRule api.IngressCommonRule
+
+ if ing.FromEndpoints != nil {
+ retRule.FromEndpoints = make([]api.EndpointSelector, len(ing.FromEndpoints))
+ for j, ep := range ing.FromEndpoints {
+ retRule.FromEndpoints[j] = getEndpointSelector(namespace, ep.LabelSelector, true, matchesInit)
+ }
+ }
+
+ if ing.FromCIDR != nil {
+ retRule.FromCIDR = make([]api.CIDR, len(ing.FromCIDR))
+ copy(retRule.FromCIDR, ing.FromCIDR)
+ }
+
+ if ing.FromCIDRSet != nil {
+ retRule.FromCIDRSet = make([]api.CIDRRule, len(ing.FromCIDRSet))
+ copy(retRule.FromCIDRSet, ing.FromCIDRSet)
+ }
+
+ if ing.FromRequires != nil {
+ retRule.FromRequires = make([]api.EndpointSelector, len(ing.FromRequires))
+ for j, ep := range ing.FromRequires {
+ retRule.FromRequires[j] = getEndpointSelector(namespace, ep.LabelSelector, false, matchesInit)
+ }
+ }
+
+ if ing.FromEntities != nil {
+ retRule.FromEntities = make([]api.Entity, len(ing.FromEntities))
+ copy(retRule.FromEntities, ing.FromEntities)
+ }
+
+ return retRule
+}
+
+func parseToCiliumIngressRule(namespace string, es api.EndpointSelector, inRules []api.IngressRule) []api.IngressRule {
+ var retRules []api.IngressRule
+
+ if inRules != nil {
+ retRules = make([]api.IngressRule, len(inRules))
+ for i, ing := range inRules {
+ if ing.ToPorts != nil {
+ retRules[i].ToPorts = make([]api.PortRule, len(ing.ToPorts))
+ copy(retRules[i].ToPorts, ing.ToPorts)
+ }
+ if ing.ICMPs != nil {
+ retRules[i].ICMPs = make(api.ICMPRules, len(ing.ICMPs))
+ copy(retRules[i].ICMPs, ing.ICMPs)
+ }
+ retRules[i].IngressCommonRule = parseToCiliumIngressCommonRule(namespace, es, ing.IngressCommonRule)
+ retRules[i].Authentication = ing.Authentication.DeepCopy()
+ retRules[i].SetAggregatedSelectors()
+ }
+ }
+ return retRules
+}
+
+func parseToCiliumIngressDenyRule(namespace string, es api.EndpointSelector, inRules []api.IngressDenyRule) []api.IngressDenyRule {
+ var retRules []api.IngressDenyRule
+
+ if inRules != nil {
+ retRules = make([]api.IngressDenyRule, len(inRules))
+ for i, ing := range inRules {
+ if ing.ToPorts != nil {
+ retRules[i].ToPorts = make([]api.PortDenyRule, len(ing.ToPorts))
+ copy(retRules[i].ToPorts, ing.ToPorts)
+ }
+ if ing.ICMPs != nil {
+ retRules[i].ICMPs = make(api.ICMPRules, len(ing.ICMPs))
+ copy(retRules[i].ICMPs, ing.ICMPs)
+ }
+ retRules[i].IngressCommonRule = parseToCiliumIngressCommonRule(namespace, es, ing.IngressCommonRule)
+ retRules[i].SetAggregatedSelectors()
+ }
+ }
+ return retRules
+}
+
+func parseToCiliumEgressCommonRule(namespace string, es api.EndpointSelector, egr api.EgressCommonRule) api.EgressCommonRule {
+ matchesInit := matchesPodInit(es)
+ var retRule api.EgressCommonRule
+ if egr.ToEndpoints != nil {
+ retRule.ToEndpoints = make([]api.EndpointSelector, len(egr.ToEndpoints))
+ for j, ep := range egr.ToEndpoints {
+ retRule.ToEndpoints[j] = getEndpointSelector(namespace, ep.LabelSelector, true, matchesInit)
+ }
+ }
+
+ if egr.ToCIDR != nil {
+ retRule.ToCIDR = make([]api.CIDR, len(egr.ToCIDR))
+ copy(retRule.ToCIDR, egr.ToCIDR)
+ }
+
+ if egr.ToCIDRSet != nil {
+ retRule.ToCIDRSet = make(api.CIDRRuleSlice, len(egr.ToCIDRSet))
+ copy(retRule.ToCIDRSet, egr.ToCIDRSet)
+ }
+
+ if egr.ToRequires != nil {
+ retRule.ToRequires = make([]api.EndpointSelector, len(egr.ToRequires))
+ for j, ep := range egr.ToRequires {
+ retRule.ToRequires[j] = getEndpointSelector(namespace, ep.LabelSelector, false, matchesInit)
+ }
+ }
+
+ if egr.ToServices != nil {
+ retRule.ToServices = make([]api.Service, len(egr.ToServices))
+ copy(retRule.ToServices, egr.ToServices)
+ }
+
+ if egr.ToEntities != nil {
+ retRule.ToEntities = make([]api.Entity, len(egr.ToEntities))
+ copy(retRule.ToEntities, egr.ToEntities)
+ }
+
+ if egr.ToGroups != nil {
+ retRule.ToGroups = make([]api.ToGroups, len(egr.ToGroups))
+ copy(retRule.ToGroups, egr.ToGroups)
+ }
+
+ return retRule
+}
+
+func parseToCiliumEgressRule(namespace string, es api.EndpointSelector, inRules []api.EgressRule) []api.EgressRule {
+ var retRules []api.EgressRule
+
+ if inRules != nil {
+ retRules = make([]api.EgressRule, len(inRules))
+ for i, egr := range inRules {
+ if egr.ToPorts != nil {
+ retRules[i].ToPorts = make([]api.PortRule, len(egr.ToPorts))
+ copy(retRules[i].ToPorts, egr.ToPorts)
+ }
+
+ if egr.ICMPs != nil {
+ retRules[i].ICMPs = make(api.ICMPRules, len(egr.ICMPs))
+ copy(retRules[i].ICMPs, egr.ICMPs)
+ }
+
+ if egr.ToFQDNs != nil {
+ retRules[i].ToFQDNs = make([]api.FQDNSelector, len(egr.ToFQDNs))
+ copy(retRules[i].ToFQDNs, egr.ToFQDNs)
+ }
+
+ retRules[i].EgressCommonRule = parseToCiliumEgressCommonRule(namespace, es, egr.EgressCommonRule)
+ retRules[i].Authentication = egr.Authentication
+ retRules[i].SetAggregatedSelectors()
+ }
+ }
+ return retRules
+}
+
+func parseToCiliumEgressDenyRule(namespace string, es api.EndpointSelector, inRules []api.EgressDenyRule) []api.EgressDenyRule {
+ var retRules []api.EgressDenyRule
+
+ if inRules != nil {
+ retRules = make([]api.EgressDenyRule, len(inRules))
+ for i, egr := range inRules {
+ if egr.ToPorts != nil {
+ retRules[i].ToPorts = make([]api.PortDenyRule, len(egr.ToPorts))
+ copy(retRules[i].ToPorts, egr.ToPorts)
+ }
+
+ if egr.ICMPs != nil {
+ retRules[i].ICMPs = make(api.ICMPRules, len(egr.ICMPs))
+ copy(retRules[i].ICMPs, egr.ICMPs)
+ }
+
+ retRules[i].EgressCommonRule = parseToCiliumEgressCommonRule(namespace, es, egr.EgressCommonRule)
+ retRules[i].SetAggregatedSelectors()
+ }
+ }
+ return retRules
+}
+
+func matchesPodInit(epSelector api.EndpointSelector) bool {
+ if epSelector.LabelSelector == nil {
+ return false
+ }
+ return epSelector.HasKey(podInitLbl)
+}
+
+// namespacesAreValid checks the set of namespaces from a rule returns true if
+// they are not specified, or if they are specified and match the namespace
+// where the rule is being inserted.
+func namespacesAreValid(namespace string, userNamespaces []string) bool {
+ return len(userNamespaces) == 0 ||
+ (len(userNamespaces) == 1 && userNamespaces[0] == namespace)
+}
+
+// ParseToCiliumRule returns an api.Rule with all the labels parsed into cilium
+// labels. If the namespace provided is empty then the rule is cluster scoped, this
+// might happen in case of CiliumClusterwideNetworkPolicy which enforces a policy on the cluster
+// instead of the particular namespace.
+func ParseToCiliumRule(namespace, name string, uid types.UID, r *api.Rule) *api.Rule {
+ retRule := &api.Rule{}
+ if r.EndpointSelector.LabelSelector != nil {
+ retRule.EndpointSelector = api.NewESFromK8sLabelSelector("", r.EndpointSelector.LabelSelector)
+ // The PodSelector should only reflect to the same namespace
+ // the policy is being stored, thus we add the namespace to
+ // the MatchLabels map.
+ //
+ // Policies applying to all namespaces are a special case.
+ // Such policies can match on any traffic from Pods or Nodes,
+ // so it wouldn't make sense to inject a namespace match for
+ // those policies.
+ if namespace != "" {
+ userNamespace, present := r.EndpointSelector.GetMatch(podPrefixLbl)
+ if present && !namespacesAreValid(namespace, userNamespace) {
+ log.WithFields(logrus.Fields{
+ logfields.K8sNamespace: namespace,
+ logfields.CiliumNetworkPolicyName: name,
+ logfields.K8sNamespace + ".illegal": userNamespace,
+ }).Warn("CiliumNetworkPolicy contains illegal namespace match in EndpointSelector." +
+ " EndpointSelector always applies in namespace of the policy resource, removing illegal namespace match'.")
+ }
+ retRule.EndpointSelector.AddMatch(podPrefixLbl, namespace)
+ }
+ } else if r.NodeSelector.LabelSelector != nil {
+ retRule.NodeSelector = api.NewESFromK8sLabelSelector("", r.NodeSelector.LabelSelector)
+ }
+
+ retRule.Ingress = parseToCiliumIngressRule(namespace, r.EndpointSelector, r.Ingress)
+ retRule.IngressDeny = parseToCiliumIngressDenyRule(namespace, r.EndpointSelector, r.IngressDeny)
+ retRule.Egress = parseToCiliumEgressRule(namespace, r.EndpointSelector, r.Egress)
+ retRule.EgressDeny = parseToCiliumEgressDenyRule(namespace, r.EndpointSelector, r.EgressDeny)
+
+ retRule.Labels = ParseToCiliumLabels(namespace, name, uid, r.Labels)
+
+ retRule.Description = r.Description
+
+ return retRule
+}
+
+// ParseToCiliumLabels returns all ruleLbls appended with a specific label that
+// represents the given namespace and name along with a label that specifies
+// these labels were derived from a CiliumNetworkPolicy.
+func ParseToCiliumLabels(namespace, name string, uid types.UID, ruleLbs labels.LabelArray) labels.LabelArray {
+ resourceType := ResourceTypeCiliumNetworkPolicy
+ if namespace == "" {
+ resourceType = ResourceTypeCiliumClusterwideNetworkPolicy
+ }
+
+ policyLbls := GetPolicyLabels(namespace, name, uid, resourceType)
+ return append(policyLbls, ruleLbs...).Sort()
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/ccec_types.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/ccec_types.go
new file mode 100644
index 000000000..48823bf8b
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/ccec_types.go
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package v2
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +kubebuilder:resource:categories={cilium},singular="ciliumclusterwideenvoyconfig",path="ciliumclusterwideenvoyconfigs",scope="Cluster",shortName={ccec}
+// +kubebuilder:printcolumn:JSONPath=".metadata.creationTimestamp",description="The age of the identity",name="Age",type=date
+// +kubebuilder:storageversion
+
+type CiliumClusterwideEnvoyConfig struct {
+ // +k8s:openapi-gen=false
+ // +deepequal-gen=false
+ metav1.TypeMeta `json:",inline"`
+ // +k8s:openapi-gen=false
+ // +deepequal-gen=false
+ metav1.ObjectMeta `json:"metadata"`
+
+ // +k8s:openapi-gen=false
+ Spec CiliumEnvoyConfigSpec `json:"spec,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +deepequal-gen=false
+
+// CiliumClusterwideEnvoyConfigList is a list of CiliumClusterwideEnvoyConfig objects.
+type CiliumClusterwideEnvoyConfigList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata"`
+
+ // Items is a list of CiliumClusterwideEnvoyConfig.
+ Items []CiliumClusterwideEnvoyConfig `json:"items"`
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/ccnp_types.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/ccnp_types.go
new file mode 100644
index 000000000..14ff2c20c
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/ccnp_types.go
@@ -0,0 +1,141 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package v2
+
+import (
+ "fmt"
+ "reflect"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ k8sCiliumUtils "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/utils"
+ "github.com/cilium/cilium/pkg/policy/api"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +deepequal-gen:private-method=true
+// +kubebuilder:resource:categories={cilium,ciliumpolicy},singular="ciliumclusterwidenetworkpolicy",path="ciliumclusterwidenetworkpolicies",scope="Cluster",shortName={ccnp}
+// +kubebuilder:subresource:status
+// +kubebuilder:storageversion
+
+// CiliumClusterwideNetworkPolicy is a Kubernetes third-party resource with an
+// modified version of CiliumNetworkPolicy which is cluster scoped rather than
+// namespace scoped.
+type CiliumClusterwideNetworkPolicy struct {
+ // +deepequal-gen=false
+ metav1.TypeMeta `json:",inline"`
+ // +deepequal-gen=false
+ metav1.ObjectMeta `json:"metadata"`
+
+ // Spec is the desired Cilium specific rule specification.
+ Spec *api.Rule `json:"spec,omitempty"`
+
+ // Specs is a list of desired Cilium specific rule specification.
+ Specs api.Rules `json:"specs,omitempty"`
+
+ // Status is the status of the Cilium policy rule.
+ //
+ // The reason this field exists in this structure is due a bug in the k8s
+ // code-generator that doesn't create a `UpdateStatus` method because the
+ // field does not exist in the structure.
+ //
+ // +kubebuilder:validation:Optional
+ Status CiliumNetworkPolicyStatus `json:"status"`
+}
+
+// DeepEqual compares 2 CCNPs while ignoring the LastAppliedConfigAnnotation
+// and ignoring the Status field of the CCNP.
+func (in *CiliumClusterwideNetworkPolicy) DeepEqual(other *CiliumClusterwideNetworkPolicy) bool {
+ return objectMetaDeepEqual(in.ObjectMeta, other.ObjectMeta) && in.deepEqual(other)
+}
+
+// GetPolicyStatus returns the CiliumClusterwideNetworkPolicyNodeStatus corresponding to
+// nodeName in the provided CiliumClusterwideNetworkPolicy. If Nodes within the rule's
+// Status is nil, returns an empty CiliumClusterwideNetworkPolicyNodeStatus.
+func (r *CiliumClusterwideNetworkPolicy) GetPolicyStatus(nodeName string) CiliumNetworkPolicyNodeStatus {
+ if r.Status.Nodes == nil {
+ return CiliumNetworkPolicyNodeStatus{}
+ }
+ return r.Status.Nodes[nodeName]
+}
+
+// SetPolicyStatus sets the given policy status for the given nodes' map.
+func (r *CiliumClusterwideNetworkPolicy) SetPolicyStatus(nodeName string, cnpns CiliumNetworkPolicyNodeStatus) {
+ if r.Status.Nodes == nil {
+ r.Status.Nodes = map[string]CiliumNetworkPolicyNodeStatus{}
+ }
+ r.Status.Nodes[nodeName] = cnpns
+}
+
+// SetDerivedPolicyStatus set the derivative policy status for the given
+// derivative policy name.
+func (r *CiliumClusterwideNetworkPolicy) SetDerivedPolicyStatus(derivativePolicyName string, status CiliumNetworkPolicyNodeStatus) {
+ if r.Status.DerivativePolicies == nil {
+ r.Status.DerivativePolicies = map[string]CiliumNetworkPolicyNodeStatus{}
+ }
+ r.Status.DerivativePolicies[derivativePolicyName] = status
+}
+
+// AnnotationsEquals returns true if ObjectMeta.Annotations of each
+// CiliumClusterwideNetworkPolicy are equivalent (i.e., they contain equivalent key-value
+// pairs).
+func (r *CiliumClusterwideNetworkPolicy) AnnotationsEquals(o *CiliumClusterwideNetworkPolicy) bool {
+ if o == nil {
+ return r == nil
+ }
+ return reflect.DeepEqual(r.ObjectMeta.Annotations, o.ObjectMeta.Annotations)
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:openapi-gen=false
+// +deepequal-gen=false
+
+// CiliumClusterwideNetworkPolicyList is a list of
+// CiliumClusterwideNetworkPolicy objects.
+type CiliumClusterwideNetworkPolicyList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata"`
+
+ // Items is a list of CiliumClusterwideNetworkPolicies.
+ Items []CiliumClusterwideNetworkPolicy `json:"items"`
+}
+
+// Parse parses a CiliumClusterwideNetworkPolicy and returns a list of cilium
+// policy rules.
+func (r *CiliumClusterwideNetworkPolicy) Parse() (api.Rules, error) {
+ if r.ObjectMeta.Name == "" {
+ return nil, NewErrParse("CiliumClusterwideNetworkPolicy must have name")
+ }
+
+ name := r.ObjectMeta.Name
+ uid := r.ObjectMeta.UID
+
+ retRules := api.Rules{}
+
+ if r.Spec == nil && r.Specs == nil {
+ return nil, ErrEmptyCCNP
+ }
+
+ if r.Spec != nil {
+ if err := r.Spec.Sanitize(); err != nil {
+ return nil, NewErrParse(fmt.Sprintf("Invalid CiliumClusterwideNetworkPolicy spec: %s", err))
+ }
+ cr := k8sCiliumUtils.ParseToCiliumRule("", name, uid, r.Spec)
+ retRules = append(retRules, cr)
+ }
+ if r.Specs != nil {
+ for _, rule := range r.Specs {
+ if err := rule.Sanitize(); err != nil {
+ return nil, NewErrParse(fmt.Sprintf("Invalid CiliumClusterwideNetworkPolicy specs: %s", err))
+
+ }
+ cr := k8sCiliumUtils.ParseToCiliumRule("", name, uid, rule)
+ retRules = append(retRules, cr)
+ }
+ }
+
+ return retRules, nil
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/cec_types.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/cec_types.go
new file mode 100644
index 000000000..ffff813a4
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/cec_types.go
@@ -0,0 +1,165 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package v2
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+
+ "google.golang.org/protobuf/encoding/protojson"
+ "google.golang.org/protobuf/encoding/prototext"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/types/known/anypb"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ "github.com/cilium/cilium/pkg/option"
+)
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +kubebuilder:resource:categories={cilium},singular="ciliumenvoyconfig",path="ciliumenvoyconfigs",scope="Namespaced",shortName={cec}
+// +kubebuilder:printcolumn:JSONPath=".metadata.creationTimestamp",description="The age of the identity",name="Age",type=date
+// +kubebuilder:storageversion
+
+type CiliumEnvoyConfig struct {
+ // +k8s:openapi-gen=false
+ // +deepequal-gen=false
+ metav1.TypeMeta `json:",inline"`
+ // +k8s:openapi-gen=false
+ // +deepequal-gen=false
+ metav1.ObjectMeta `json:"metadata"`
+
+ // +k8s:openapi-gen=false
+ Spec CiliumEnvoyConfigSpec `json:"spec,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +deepequal-gen=false
+
+// CiliumEnvoyConfigList is a list of CiliumEnvoyConfig objects.
+type CiliumEnvoyConfigList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata"`
+
+ // Items is a list of CiliumEnvoyConfig.
+ Items []CiliumEnvoyConfig `json:"items"`
+}
+
+type CiliumEnvoyConfigSpec struct {
+ // Services specifies Kubernetes services for which traffic is
+ // forwarded to an Envoy listener for L7 load balancing. Backends
+ // of these services are automatically synced to Envoy usign EDS.
+ //
+ // +kubebuilder:validation:Optional
+ Services []*ServiceListener `json:"services,omitempty"`
+
+ // BackendServices specifies Kubernetes services whose backends
+ // are automatically synced to Envoy using EDS. Traffic for these
+ // services is not forwarded to an Envoy listener. This allows an
+ // Envoy listener load balance traffic to these backends while
+ // normal Cilium service load balancing takes care of balancing
+ // traffic for these services at the same time.
+ //
+ // +kubebuilder:validation:Optional
+ BackendServices []*Service `json:"backendServices,omitempty"`
+
+ // Envoy xDS resources, a list of the following Envoy resource types:
+ // type.googleapis.com/envoy.config.listener.v3.Listener,
+ // type.googleapis.com/envoy.config.route.v3.RouteConfiguration,
+ // type.googleapis.com/envoy.config.cluster.v3.Cluster,
+ // type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment, and
+ // type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.Secret.
+ //
+ // +kubebuilder:validation:Required
+ Resources []XDSResource `json:"resources,omitempty"`
+}
+
+type Service struct {
+ // Name is the name of a destination Kubernetes service that identifies traffic
+ // to be redirected.
+ //
+ // +kubebuilder:validation:Required
+ Name string `json:"name"`
+
+ // Namespace is the Kubernetes service namespace.
+ // In CiliumEnvoyConfig namespace defaults to the namespace of the CEC,
+ // In CiliumClusterwideEnvoyConfig namespace defaults to "default".
+ // +kubebuilder:validation:Optional
+ Namespace string `json:"namespace"`
+
+ // Port is the port number, which can be used for filtering in case of underlying
+ // is exposing multiple port numbers.
+ //
+ // +kubebuilder:validation:Optional
+ Ports []string `json:"number,omitempty"`
+}
+
+type ServiceListener struct {
+ // Name is the name of a destination Kubernetes service that identifies traffic
+ // to be redirected.
+ //
+ // +kubebuilder:validation:Required
+ Name string `json:"name"`
+
+ // Namespace is the Kubernetes service namespace.
+ // In CiliumEnvoyConfig namespace this is overridden to the namespace of the CEC,
+ // In CiliumClusterwideEnvoyConfig namespace defaults to "default".
+ // +kubebuilder:validation:Optional
+ Namespace string `json:"namespace"`
+
+ // Listener specifies the name of the Envoy listener the
+ // service traffic is redirected to. The listener must be
+ // specified in the Envoy 'resources' of the same
+ // CiliumEnvoyConfig.
+ //
+ // If omitted, the first listener specified in 'resources' is
+ // used.
+ //
+ // +kubebuilder:validation:Optional
+ Listener string `json:"listener"`
+}
+
+// +kubebuilder:pruning:PreserveUnknownFields
+type XDSResource struct {
+ *anypb.Any `json:"-"`
+}
+
+// DeepCopyInto deep copies 'in' into 'out'.
+func (in *XDSResource) DeepCopyInto(out *XDSResource) {
+ out.Any, _ = proto.Clone(in.Any).(*anypb.Any)
+}
+
+// DeepEqual returns 'true' if 'a' and 'b' are equal.
+func (a *XDSResource) DeepEqual(b *XDSResource) bool {
+ return proto.Equal(a.Any, b.Any)
+}
+
+// MarshalJSON ensures that the unstructured object produces proper
+// JSON when passed to Go's standard JSON library.
+func (u *XDSResource) MarshalJSON() ([]byte, error) {
+ return protojson.Marshal(u.Any)
+}
+
+// UnmarshalJSON ensures that the unstructured object properly decodes
+// JSON when passed to Go's standard JSON library.
+func (u *XDSResource) UnmarshalJSON(b []byte) (err error) {
+ // xDS resources are not validated in K8s, recover from possible panics
+ defer func() {
+ if r := recover(); r != nil {
+ err = fmt.Errorf("CEC JSON decoding paniced: %v", r)
+ }
+ }()
+ u.Any = &anypb.Any{}
+ err = protojson.Unmarshal(b, u.Any)
+ if err != nil {
+ var buf bytes.Buffer
+ json.Indent(&buf, b, "", "\t")
+ log.Warningf("Ignoring invalid CiliumEnvoyConfig JSON (%s): %s",
+ err, buf.String())
+ } else if option.Config.Debug {
+ log.Debugf("CEC unmarshaled XDS Resource: %v", prototext.Format(u.Any))
+ }
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/cegp_types.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/cegp_types.go
new file mode 100644
index 000000000..9eb408a03
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/cegp_types.go
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package v2
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ slimv1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +kubebuilder:resource:categories={cilium,ciliumpolicy},singular="ciliumegressgatewaypolicy",path="ciliumegressgatewaypolicies",scope="Cluster",shortName={cegp}
+// +kubebuilder:printcolumn:JSONPath=".metadata.creationTimestamp",name="Age",type=date
+// +kubebuilder:storageversion
+
+type CiliumEgressGatewayPolicy struct {
+ // +k8s:openapi-gen=false
+ // +deepequal-gen=false
+ metav1.TypeMeta `json:",inline"`
+ // +k8s:openapi-gen=false
+ // +deepequal-gen=false
+ metav1.ObjectMeta `json:"metadata"`
+
+ Spec CiliumEgressGatewayPolicySpec `json:"spec,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:openapi-gen=false
+// +deepequal-gen=false
+
+// CiliumEgressGatewayPolicyList is a list of CiliumEgressGatewayPolicy objects.
+type CiliumEgressGatewayPolicyList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata"`
+
+ // Items is a list of CiliumEgressGatewayPolicy.
+ Items []CiliumEgressGatewayPolicy `json:"items"`
+}
+
+// +kubebuilder:validation:Pattern=`^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\/([0-9]|[1-2][0-9]|3[0-2])$`
+type IPv4CIDR string
+
+type CiliumEgressGatewayPolicySpec struct {
+ // Egress represents a list of rules by which egress traffic is
+ // filtered from the source pods.
+ Selectors []EgressRule `json:"selectors"`
+
+ // DestinationCIDRs is a list of destination CIDRs for destination IP addresses.
+ // If a destination IP matches any one CIDR, it will be selected.
+ DestinationCIDRs []IPv4CIDR `json:"destinationCIDRs"`
+
+ // ExcludedCIDRs is a list of destination CIDRs that will be excluded
+ // from the egress gateway redirection and SNAT logic.
+ // Should be a subset of destinationCIDRs otherwise it will not have any
+ // effect.
+ //
+ // +kubebuilder:validation:Optional
+ ExcludedCIDRs []IPv4CIDR `json:"excludedCIDRs"`
+
+ // EgressGateway is the gateway node responsible for SNATing traffic.
+ EgressGateway *EgressGateway `json:"egressGateway"`
+}
+
+// EgressGateway identifies the node that should act as egress gateway for a
+// given egress Gateway policy. In addition to that it also specifies the
+// configuration of said node (which egress IP or network interface should be
+// used to SNAT traffic).
+type EgressGateway struct {
+ // This is a label selector which selects the node that should act as
+ // egress gateway for the given policy.
+ // In case multiple nodes are selected, only the first one in the
+ // lexical ordering over the node names will be used.
+ // This field follows standard label selector semantics.
+ //
+ // +kubebuilder:validation:Required
+ NodeSelector *slimv1.LabelSelector `json:"nodeSelector"`
+
+ // Interface is the network interface to which the egress IP address
+ // that the traffic is SNATed with is assigned.
+ //
+ // Example:
+ // When set to "eth1", matching egress traffic will be redirected to the
+ // node matching the NodeSelector field and SNATed with the first IPv4
+ // address assigned to the eth1 interface.
+ //
+ // When none of the Interface or EgressIP fields is specified, the
+ // policy will use the first IPv4 assigned to the interface with the
+ // default route.
+ Interface string `json:"interface,omitempty"`
+
+ // EgressIP is the source IP address that the egress traffic is SNATed
+ // with.
+ //
+ // Example:
+ // When set to "192.168.1.100", matching egress traffic will be
+ // redirected to the node matching the NodeSelector field and SNATed
+ // with IP address 192.168.1.100.
+ //
+ // When none of the Interface or EgressIP fields is specified, the
+ // policy will use the first IPv4 assigned to the interface with the
+ // default route.
+ //
+ // +kubebuilder:validation:Pattern=`((^\s*((([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]))\s*$)|(^\s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:)))(%.+)?\s*$))`
+ EgressIP string `json:"egressIP,omitempty"`
+}
+
+type EgressRule struct {
+ // Selects Namespaces using cluster-scoped labels. This field follows standard label
+ // selector semantics; if present but empty, it selects all namespaces.
+ NamespaceSelector *slimv1.LabelSelector `json:"namespaceSelector,omitempty"`
+
+ // This is a label selector which selects Pods. This field follows standard label
+ // selector semantics; if present but empty, it selects all pods.
+ PodSelector *slimv1.LabelSelector `json:"podSelector,omitempty"`
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/cew_types.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/cew_types.go
new file mode 100644
index 000000000..8d6081182
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/cew_types.go
@@ -0,0 +1,84 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package v2
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +kubebuilder:resource:categories={cilium},singular="ciliumexternalworkload",path="ciliumexternalworkloads",scope="Cluster",shortName={cew}
+// +kubebuilder:printcolumn:JSONPath=".status.id",name="Cilium ID",type=integer
+// +kubebuilder:printcolumn:JSONPath=".status.ip",name="IP",type=string
+// +kubebuilder:subresource:status
+
+// CiliumExternalWorkload is a Kubernetes Custom Resource that
+// contains a specification for an external workload that can join the
+// cluster. The name of the CRD is the FQDN of the external workload,
+// and it needs to match the name in the workload registration. The
+// labels on the CRD object are the labels that will be used to
+// allocate a Cilium Identity for the external workload. If
+// 'io.kubernetes.pod.namespace' or 'io.kubernetes.pod.name' labels
+// are not explicitly specified, they will be defaulted to 'default'
+// and , respectively. 'io.cilium.k8s.policy.cluster'
+// will always be defined as the name of the current cluster, which
+// defaults to "default".
+type CiliumExternalWorkload struct {
+ // +k8s:openapi-gen=false
+ // +deepequal-gen=false
+ metav1.TypeMeta `json:",inline"`
+ // +k8s:openapi-gen=false
+ // +deepequal-gen=false
+ metav1.ObjectMeta `json:"metadata"`
+
+ // Spec is the desired configuration of the external Cilium workload.
+ Spec CiliumExternalWorkloadSpec `json:"spec,omitempty"`
+
+ // Status is the most recent status of the external Cilium workload.
+ // It is a read-only field.
+ //
+ // +deepequal-gen=false
+ // +kubebuilder:validation:Optional
+ Status CiliumExternalWorkloadStatus `json:"status"`
+}
+
+// CiliumExternalWorkloadSpec specifies the configurations for redirecting traffic
+// within a workload.
+type CiliumExternalWorkloadSpec struct {
+ // IPv4AllocCIDR is the range of IPv4 addresses in the CIDR format that the external workload can
+ // use to allocate IP addresses for the tunnel device and the health endpoint.
+ //
+ // +kubebuilder:validation:Pattern=`^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\/([0-9]|[1-2][0-9]|3[0-2])$`
+ IPv4AllocCIDR string `json:"ipv4-alloc-cidr,omitempty"`
+
+ // IPv6AllocCIDR is the range of IPv6 addresses in the CIDR format that the external workload can
+ // use to allocate IP addresses for the tunnel device and the health endpoint.
+ //
+ // +kubebuilder:validation:Pattern=`^s*((([0-9A-Fa-f]{1,4}:){7}(:|([0-9A-Fa-f]{1,4})))|(([0-9A-Fa-f]{1,4}:){6}:([0-9A-Fa-f]{1,4})?)|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){0,1}):([0-9A-Fa-f]{1,4})?))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){0,2}):([0-9A-Fa-f]{1,4})?))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){0,3}):([0-9A-Fa-f]{1,4})?))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){0,4}):([0-9A-Fa-f]{1,4})?))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){0,5}):([0-9A-Fa-f]{1,4})?))|(:(:|((:[0-9A-Fa-f]{1,4}){1,7}))))(%.+)?s*/([0-9]|[1-9][0-9]|1[0-1][0-9]|12[0-8])$`
+ IPv6AllocCIDR string `json:"ipv6-alloc-cidr,omitempty"`
+}
+
+// CiliumExternalWorkloadStatus is the status of a the external Cilium workload.
+type CiliumExternalWorkloadStatus struct {
+ // ID is the numeric identity allocated for the external workload.
+ ID uint64 `json:"id,omitempty"`
+
+ // IP is the IP address of the workload. Empty if the workload has not registered.
+ IP string `json:"ip,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:openapi-gen=false
+// +deepequal-gen=false
+
+// CiliumExternalWorkloadList is a list of CiliumExternalWorkload objects.
+type CiliumExternalWorkloadList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata"`
+
+ // Items is a list of CiliumExternalWorkload
+ Items []CiliumExternalWorkload `json:"items"`
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/clrp_types.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/clrp_types.go
new file mode 100644
index 000000000..ca6652843
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/clrp_types.go
@@ -0,0 +1,235 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package v2
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ "github.com/cilium/cilium/pkg/iana"
+ slim_metav1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1"
+ lb "github.com/cilium/cilium/pkg/loadbalancer"
+ "github.com/cilium/cilium/pkg/policy/api"
+)
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +kubebuilder:resource:categories={cilium,ciliumpolicy},singular="ciliumlocalredirectpolicy",path="ciliumlocalredirectpolicies",scope="Namespaced",shortName={clrp}
+// +kubebuilder:printcolumn:JSONPath=".metadata.creationTimestamp",name="Age",type=date
+
+// CiliumLocalRedirectPolicy is a Kubernetes Custom Resource that contains a
+// specification to redirect traffic locally within a node.
+type CiliumLocalRedirectPolicy struct {
+ // +k8s:openapi-gen=false
+ // +deepequal-gen=false
+ metav1.TypeMeta `json:",inline"`
+ // +k8s:openapi-gen=false
+ // +deepequal-gen=false
+ metav1.ObjectMeta `json:"metadata"`
+
+ // Spec is the desired behavior of the local redirect policy.
+ Spec CiliumLocalRedirectPolicySpec `json:"spec,omitempty"`
+
+ // Status is the most recent status of the local redirect policy.
+ // It is a read-only field.
+ //
+ // +deepequal-gen=false
+ // +kubebuilder:validation:Optional
+ Status CiliumLocalRedirectPolicyStatus `json:"status"`
+}
+
+type Frontend struct {
+ // IP is a destination ip address for traffic to be redirected.
+ //
+ // Example:
+ // When it is set to "169.254.169.254", traffic destined to
+ // "169.254.169.254" is redirected.
+ //
+ // +kubebuilder:validation:Pattern=`((^\s*((([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]))\s*$)|(^\s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:)))(%.+)?\s*$))`
+ // +kubebuilder:validation:Required
+ IP string `json:"ip"`
+
+ // ToPorts is a list of destination L4 ports with protocol for traffic
+ // to be redirected.
+ // When multiple ports are specified, the ports must be named.
+ //
+ // Example:
+ // When set to Port: "53" and Protocol: UDP, traffic destined to port '53'
+ // with UDP protocol is redirected.
+ //
+ // +kubebuilder:validation:Required
+ ToPorts []PortInfo `json:"toPorts"`
+}
+
+// RedirectFrontend is a frontend configuration that matches traffic that needs to be redirected.
+// The configuration must be specified using a ip/port tuple or a Kubernetes service.
+type RedirectFrontend struct {
+ // AddressMatcher is a tuple {IP, port, protocol} that matches traffic to be
+ // redirected.
+ //
+ // +kubebuilder:validation:OneOf
+ AddressMatcher *Frontend `json:"addressMatcher,omitempty"`
+
+ // ServiceMatcher specifies Kubernetes service and port that matches
+ // traffic to be redirected.
+ //
+ // +kubebuilder:validation:OneOf
+ ServiceMatcher *ServiceInfo `json:"serviceMatcher,omitempty"`
+}
+
+// PortInfo specifies L4 port number and name along with the transport protocol
+type PortInfo struct {
+ // Port is an L4 port number. The string will be strictly parsed as a single uint16.
+ //
+ // +kubebuilder:validation:Pattern=`^()([1-9]|[1-5]?[0-9]{2,4}|6[1-4][0-9]{3}|65[1-4][0-9]{2}|655[1-2][0-9]|6553[1-5])$`
+ // +kubebuilder:validation:Required
+ Port string `json:"port"`
+
+ // Protocol is the L4 protocol.
+ // Accepted values: "TCP", "UDP"
+ //
+ // +kubebuilder:validation:Enum=TCP;UDP
+ // +kubebuilder:validation:Required
+ Protocol api.L4Proto `json:"protocol"`
+
+ // Name is a port name, which must contain at least one [a-z],
+ // and may also contain [0-9] and '-' anywhere except adjacent to another
+ // '-' or in the beginning or the end.
+ //
+ // +kubebuilder:validation:Pattern=`^([0-9]{1,4})|([a-zA-Z0-9]-?)*[a-zA-Z](-?[a-zA-Z0-9])*$`
+ // +kubebuilder:validation:Optional
+ Name string `json:"name"`
+}
+
+type ServiceInfo struct {
+ // Name is the name of a destination Kubernetes service that identifies traffic
+ // to be redirected.
+ // The service type needs to be ClusterIP.
+ //
+ // Example:
+ // When this field is populated with 'serviceName:myService', all the traffic
+ // destined to the cluster IP of this service at the (specified)
+ // service port(s) will be redirected.
+ //
+ // +kubebuilder:validation:Required
+ Name string `json:"serviceName"`
+
+ // Namespace is the Kubernetes service namespace.
+ // The service namespace must match the namespace of the parent Local
+ // Redirect Policy. For Cluster-wide Local Redirect Policy, this
+ // can be any namespace.
+ // +kubebuilder:validation:Required
+ Namespace string `json:"namespace"`
+
+ // ToPorts is a list of destination service L4 ports with protocol for
+ // traffic to be redirected. If not specified, traffic for all the service
+ // ports will be redirected.
+ // When multiple ports are specified, the ports must be named.
+ //
+ // +kubebuilder:validation:Optional
+ ToPorts []PortInfo `json:"toPorts,omitempty"`
+}
+
+// RedirectBackend is a backend configuration that determines where traffic needs to be redirected to.
+type RedirectBackend struct {
+ // LocalEndpointSelector selects node local pod(s) where traffic is redirected to.
+ //
+ // +kubebuilder:validation:Required
+ LocalEndpointSelector slim_metav1.LabelSelector `json:"localEndpointSelector"`
+
+ // ToPorts is a list of L4 ports with protocol of node local pod(s) where traffic
+ // is redirected to.
+ // When multiple ports are specified, the ports must be named.
+ //
+ // +kubebuilder:validation:Required
+ ToPorts []PortInfo `json:"toPorts"`
+}
+
+// CiliumLocalRedirectPolicySpec specifies the configurations for redirecting traffic
+// within a node.
+type CiliumLocalRedirectPolicySpec struct {
+ // RedirectFrontend specifies frontend configuration to redirect traffic from.
+ // It can not be empty.
+ //
+ // +kubebuilder:validation:Required
+ RedirectFrontend RedirectFrontend `json:"redirectFrontend"`
+
+ // RedirectBackend specifies backend configuration to redirect traffic to.
+ // It can not be empty.
+ //
+ // +kubebuilder:validation:Required
+ RedirectBackend RedirectBackend `json:"redirectBackend"`
+
+ // Description can be used by the creator of the policy to describe the
+ // purpose of this policy.
+ //
+ // +kubebuilder:validation:Optional
+ Description string `json:"description,omitempty"`
+}
+
+// CiliumLocalRedirectPolicyStatus is the status of a Local Redirect Policy.
+type CiliumLocalRedirectPolicyStatus struct {
+ // TODO Define status(aditi)
+ OK bool `json:"ok,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:openapi-gen=false
+// +deepequal-gen=false
+
+// CiliumLocalRedirectPolicyList is a list of CiliumLocalRedirectPolicy objects.
+type CiliumLocalRedirectPolicyList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata"`
+
+ // Items is a list of CiliumLocalRedirectPolicy
+ Items []CiliumLocalRedirectPolicy `json:"items"`
+}
+
+// SanitizePortInfo sanitizes all the fields in the PortInfo.
+// It returns port number, name, and protocol derived from the given input and error (failure cases).
+func (pInfo *PortInfo) SanitizePortInfo(checkNamedPort bool) (uint16, string, lb.L4Type, error) {
+ var (
+ pInt uint16
+ pName string
+ protocol lb.L4Type
+ )
+ // Sanitize port
+ if pInfo.Port == "" {
+ return pInt, pName, protocol, fmt.Errorf("port must be specified")
+ } else {
+ p, err := strconv.ParseUint(pInfo.Port, 0, 16)
+ if err != nil {
+ return pInt, pName, protocol, fmt.Errorf("unable to parse port: %v", err)
+ }
+ if p == 0 {
+ return pInt, pName, protocol, fmt.Errorf("port cannot be 0")
+ }
+ pInt = uint16(p)
+ }
+ // Sanitize name
+ if checkNamedPort {
+ if pInfo.Name == "" {
+ return pInt, pName, protocol, fmt.Errorf("port %s in the local "+
+ "redirect policy spec must have a valid IANA_SVC_NAME, as there are multiple ports", pInfo.Port)
+
+ }
+ if !iana.IsSvcName(pInfo.Name) {
+ return pInt, pName, protocol, fmt.Errorf("port name %s isn't a "+
+ "valid IANA_SVC_NAME", pInfo.Name)
+ }
+ }
+ pName = strings.ToLower(pInfo.Name) // Normalize for case insensitive comparison
+
+ // Sanitize protocol
+ var err error
+ protocol, err = lb.NewL4Type(string(pInfo.Protocol))
+ if err != nil {
+ return pInt, pName, protocol, err
+ }
+ return pInt, pName, protocol, nil
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/cnp_types.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/cnp_types.go
new file mode 100644
index 000000000..1fb1fdf50
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/cnp_types.go
@@ -0,0 +1,297 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package v2
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+
+ v1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ "github.com/cilium/cilium/pkg/comparator"
+ k8sConst "github.com/cilium/cilium/pkg/k8s/apis/cilium.io"
+ k8sCiliumUtils "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/utils"
+ slimv1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1"
+ k8sUtils "github.com/cilium/cilium/pkg/k8s/utils"
+ "github.com/cilium/cilium/pkg/labels"
+ "github.com/cilium/cilium/pkg/policy/api"
+)
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +deepequal-gen:private-method=true
+// +kubebuilder:resource:categories={cilium,ciliumpolicy},singular="ciliumnetworkpolicy",path="ciliumnetworkpolicies",scope="Namespaced",shortName={cnp,ciliumnp}
+// +kubebuilder:printcolumn:JSONPath=".metadata.creationTimestamp",name="Age",type=date
+// +kubebuilder:subresource:status
+// +kubebuilder:storageversion
+
+// CiliumNetworkPolicy is a Kubernetes third-party resource with an extended
+// version of NetworkPolicy.
+type CiliumNetworkPolicy struct {
+ // +deepequal-gen=false
+ metav1.TypeMeta `json:",inline"`
+ // +deepequal-gen=false
+ metav1.ObjectMeta `json:"metadata"`
+
+ // Spec is the desired Cilium specific rule specification.
+ Spec *api.Rule `json:"spec,omitempty"`
+
+ // Specs is a list of desired Cilium specific rule specification.
+ Specs api.Rules `json:"specs,omitempty"`
+
+ // Status is the status of the Cilium policy rule
+ //
+ // +deepequal-gen=false
+ // +kubebuilder:validation:Optional
+ Status CiliumNetworkPolicyStatus `json:"status"`
+}
+
+// DeepEqual compares 2 CNPs.
+func (in *CiliumNetworkPolicy) DeepEqual(other *CiliumNetworkPolicy) bool {
+ return objectMetaDeepEqual(in.ObjectMeta, other.ObjectMeta) && in.deepEqual(other)
+}
+
+// objectMetaDeepEqual performs an equality check for metav1.ObjectMeta that
+// ignores the LastAppliedConfigAnnotation. This function's usage is shared
+// among CNP and CCNP as they have the same structure.
+func objectMetaDeepEqual(in, other metav1.ObjectMeta) bool {
+ if !(in.Name == other.Name && in.Namespace == other.Namespace) {
+ return false
+ }
+
+ return comparator.MapStringEqualsIgnoreKeys(
+ in.GetAnnotations(),
+ other.GetAnnotations(),
+ // Ignore v1.LastAppliedConfigAnnotation annotation
+ []string{v1.LastAppliedConfigAnnotation})
+}
+
+// +deepequal-gen=true
+
+// CiliumNetworkPolicyStatus is the status of a Cilium policy rule.
+type CiliumNetworkPolicyStatus struct {
+ // Nodes is the Cilium policy status for each node
+ Nodes map[string]CiliumNetworkPolicyNodeStatus `json:"nodes,omitempty"`
+
+ // DerivativePolicies is the status of all policies derived from the Cilium
+ // policy
+ DerivativePolicies map[string]CiliumNetworkPolicyNodeStatus `json:"derivativePolicies,omitempty"`
+}
+
+// +deepequal-gen=true
+
+// CiliumNetworkPolicyNodeStatus is the status of a Cilium policy rule for a
+// specific node.
+type CiliumNetworkPolicyNodeStatus struct {
+ // OK is true when the policy has been parsed and imported successfully
+ // into the in-memory policy repository on the node.
+ OK bool `json:"ok,omitempty"`
+
+ // Error describes any error that occurred when parsing or importing the
+ // policy, or realizing the policy for the endpoints to which it applies
+ // on the node.
+ Error string `json:"error,omitempty"`
+
+ // LastUpdated contains the last time this status was updated
+ LastUpdated slimv1.Time `json:"lastUpdated,omitempty"`
+
+ // Revision is the policy revision of the repository which first implemented
+ // this policy.
+ Revision uint64 `json:"localPolicyRevision,omitempty"`
+
+ // Enforcing is set to true once all endpoints present at the time the
+ // policy has been imported are enforcing this policy.
+ Enforcing bool `json:"enforcing,omitempty"`
+
+ // Annotations corresponds to the Annotations in the ObjectMeta of the CNP
+ // that have been realized on the node for CNP. That is, if a CNP has been
+ // imported and has been assigned annotation X=Y by the user,
+ // Annotations in CiliumNetworkPolicyNodeStatus will be X=Y once the
+ // CNP that was imported corresponding to Annotation X=Y has been realized on
+ // the node.
+ Annotations map[string]string `json:"annotations,omitempty"`
+}
+
+// CreateCNPNodeStatus returns a CiliumNetworkPolicyNodeStatus created from the
+// provided fields.
+func CreateCNPNodeStatus(enforcing, ok bool, cnpError error, rev uint64, annotations map[string]string) CiliumNetworkPolicyNodeStatus {
+ cnpns := CiliumNetworkPolicyNodeStatus{
+ Enforcing: enforcing,
+ Revision: rev,
+ OK: ok,
+ LastUpdated: slimv1.Now(),
+ Annotations: annotations,
+ }
+ if cnpError != nil {
+ cnpns.Error = cnpError.Error()
+ }
+ return cnpns
+}
+
+func (r *CiliumNetworkPolicy) String() string {
+ result := ""
+ result += fmt.Sprintf("TypeMeta: %s, ", r.TypeMeta.String())
+ result += fmt.Sprintf("ObjectMeta: %s, ", r.ObjectMeta.String())
+ if r.Spec != nil {
+ result += fmt.Sprintf("Spec: %v", *(r.Spec))
+ }
+ if r.Specs != nil {
+ result += fmt.Sprintf("Specs: %v", r.Specs)
+ }
+ result += fmt.Sprintf("Status: %v", r.Status)
+ return result
+}
+
+// GetPolicyStatus returns the CiliumNetworkPolicyNodeStatus corresponding to
+// nodeName in the provided CiliumNetworkPolicy. If Nodes within the rule's
+// Status is nil, returns an empty CiliumNetworkPolicyNodeStatus.
+func (r *CiliumNetworkPolicy) GetPolicyStatus(nodeName string) CiliumNetworkPolicyNodeStatus {
+ if r.Status.Nodes == nil {
+ return CiliumNetworkPolicyNodeStatus{}
+ }
+ return r.Status.Nodes[nodeName]
+}
+
+// SetPolicyStatus sets the given policy status for the given nodes' map.
+func (r *CiliumNetworkPolicy) SetPolicyStatus(nodeName string, cnpns CiliumNetworkPolicyNodeStatus) {
+ if r.Status.Nodes == nil {
+ r.Status.Nodes = map[string]CiliumNetworkPolicyNodeStatus{}
+ }
+ r.Status.Nodes[nodeName] = cnpns
+}
+
+// SetDerivedPolicyStatus set the derivative policy status for the given
+// derivative policy name.
+func (r *CiliumNetworkPolicy) SetDerivedPolicyStatus(derivativePolicyName string, status CiliumNetworkPolicyNodeStatus) {
+ if r.Status.DerivativePolicies == nil {
+ r.Status.DerivativePolicies = map[string]CiliumNetworkPolicyNodeStatus{}
+ }
+ r.Status.DerivativePolicies[derivativePolicyName] = status
+}
+
+// AnnotationsEquals returns true if ObjectMeta.Annotations of each
+// CiliumNetworkPolicy are equivalent (i.e., they contain equivalent key-value
+// pairs).
+func (r *CiliumNetworkPolicy) AnnotationsEquals(o *CiliumNetworkPolicy) bool {
+ if o == nil {
+ return r == nil
+ }
+ return reflect.DeepEqual(r.ObjectMeta.Annotations, o.ObjectMeta.Annotations)
+}
+
+// Parse parses a CiliumNetworkPolicy and returns a list of cilium policy
+// rules.
+func (r *CiliumNetworkPolicy) Parse() (api.Rules, error) {
+ if r.ObjectMeta.Name == "" {
+ return nil, NewErrParse("CiliumNetworkPolicy must have name")
+ }
+
+ namespace := k8sUtils.ExtractNamespace(&r.ObjectMeta)
+ // Temporary fix for CCNPs. See #12834.
+ // TL;DR. CCNPs are converted into SlimCNPs and end up here so we need to
+ // convert them back to CCNPs to allow proper parsing.
+ if namespace == "" {
+ ccnp := CiliumClusterwideNetworkPolicy{
+ TypeMeta: r.TypeMeta,
+ ObjectMeta: r.ObjectMeta,
+ Spec: r.Spec,
+ Specs: r.Specs,
+ Status: r.Status,
+ }
+ return ccnp.Parse()
+ }
+ name := r.ObjectMeta.Name
+ uid := r.ObjectMeta.UID
+
+ retRules := api.Rules{}
+
+ if r.Spec == nil && r.Specs == nil {
+ return nil, ErrEmptyCNP
+ }
+
+ if r.Spec != nil {
+ if err := r.Spec.Sanitize(); err != nil {
+ return nil, NewErrParse(fmt.Sprintf("Invalid CiliumNetworkPolicy spec: %s", err))
+ }
+ if r.Spec.NodeSelector.LabelSelector != nil {
+ return nil, NewErrParse("Invalid CiliumNetworkPolicy spec: rule cannot have NodeSelector")
+ }
+ cr := k8sCiliumUtils.ParseToCiliumRule(namespace, name, uid, r.Spec)
+ retRules = append(retRules, cr)
+ }
+ if r.Specs != nil {
+ for _, rule := range r.Specs {
+ if err := rule.Sanitize(); err != nil {
+ return nil, NewErrParse(fmt.Sprintf("Invalid CiliumNetworkPolicy specs: %s", err))
+
+ }
+ cr := k8sCiliumUtils.ParseToCiliumRule(namespace, name, uid, rule)
+ retRules = append(retRules, cr)
+ }
+ }
+
+ return retRules, nil
+}
+
+// GetControllerName returns the unique name for the controller manager.
+func (r *CiliumNetworkPolicy) GetControllerName() string {
+ name := k8sUtils.GetObjNamespaceName(&r.ObjectMeta)
+ const staticLen = 6
+ var str strings.Builder
+ str.Grow(staticLen + len(name) + len(k8sConst.CtrlPrefixPolicyStatus))
+ str.WriteString(k8sConst.CtrlPrefixPolicyStatus)
+ str.WriteString(" (v2 ")
+ str.WriteString(name)
+ str.WriteString(")")
+ return str.String()
+}
+
+// GetIdentityLabels returns all rule labels in the CiliumNetworkPolicy.
+func (r *CiliumNetworkPolicy) GetIdentityLabels() labels.LabelArray {
+ namespace := k8sUtils.ExtractNamespace(&r.ObjectMeta)
+ name := r.ObjectMeta.Name
+ uid := r.ObjectMeta.UID
+
+ // Even though the struct represents CiliumNetworkPolicy, we use it both for
+ // CiliumNetworkPolicy and CiliumClusterwideNetworkPolicy, so here we check for namespace
+ // to send correct derivedFrom label to get the correct policy labels.
+ derivedFrom := k8sCiliumUtils.ResourceTypeCiliumNetworkPolicy
+ if namespace == "" {
+ derivedFrom = k8sCiliumUtils.ResourceTypeCiliumClusterwideNetworkPolicy
+ }
+ return k8sCiliumUtils.GetPolicyLabels(namespace, name, uid, derivedFrom)
+}
+
+// RequiresDerivative return true if the CNP has any rule that will create a new
+// derivative rule.
+func (r *CiliumNetworkPolicy) RequiresDerivative() bool {
+ if r.Spec != nil {
+ if r.Spec.RequiresDerivative() {
+ return true
+ }
+ }
+ if r.Specs != nil {
+ for _, rule := range r.Specs {
+ if rule.RequiresDerivative() {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:openapi-gen=false
+// +deepequal-gen=false
+
+// CiliumNetworkPolicyList is a list of CiliumNetworkPolicy objects.
+type CiliumNetworkPolicyList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata"`
+
+ // Items is a list of CiliumNetworkPolicy
+ Items []CiliumNetworkPolicy `json:"items"`
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/doc.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/doc.go
new file mode 100644
index 000000000..8b8c1ed19
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/doc.go
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// +k8s:deepcopy-gen=package,register
+// +k8s:openapi-gen=true
+// +deepequal-gen=package
+
+// Package v2 is the v2 version of the API.
+// +groupName=cilium.io
+package v2
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/errors.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/errors.go
new file mode 100644
index 000000000..cc8a0653d
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/errors.go
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package v2
+
+var (
+ // ErrEmptyCNP is an error representing a CNP that is empty, which means it is
+ // missing both a `spec` and `specs` (both are nil).
+ ErrEmptyCNP = NewErrParse("Invalid CiliumNetworkPolicy spec(s): empty policy")
+
+ // ErrEmptyCCNP is an error representing a CCNP that is empty, which means it is
+ // missing both a `spec` and `specs` (both are nil).
+ ErrEmptyCCNP = NewErrParse("Invalid CiliumClusterwideNetworkPolicy spec(s): empty policy")
+
+ // ParsingErr is for comparison when checking error types.
+ ParsingErr = NewErrParse("")
+)
+
+// ErrParse is an error to describe where policy fails to parse due any invalid
+// rule.
+//
+// +k8s:deepcopy-gen=false
+// +deepequal-gen=false
+type ErrParse struct {
+ msg string
+}
+
+// NewErrParse returns a new ErrParse.
+func NewErrParse(msg string) ErrParse {
+ return ErrParse{
+ msg: msg,
+ }
+}
+
+// Error returns the error message for parsing
+func (e ErrParse) Error() string {
+ return e.msg
+}
+
+// Is returns true if the given error is the type of 'ErrParse'.
+func (_ ErrParse) Is(e error) bool {
+ _, ok := e.(ErrParse)
+ return ok
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/logfields.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/logfields.go
new file mode 100644
index 000000000..b4f556373
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/logfields.go
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package v2
+
+import (
+ "github.com/cilium/cilium/pkg/logging"
+ "github.com/cilium/cilium/pkg/logging/logfields"
+)
+
+var log = logging.DefaultLogger.WithField(logfields.LogSubsys, "k8s-apis-cilium.io-v2")
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/register.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/register.go
new file mode 100644
index 000000000..3197b4613
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/register.go
@@ -0,0 +1,197 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package v2
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+
+ k8sconst "github.com/cilium/cilium/pkg/k8s/apis/cilium.io"
+)
+
+const (
+ // CustomResourceDefinitionGroup is the name of the third party resource group
+ CustomResourceDefinitionGroup = k8sconst.CustomResourceDefinitionGroup
+
+ // CustomResourceDefinitionVersion is the current version of the resource
+ CustomResourceDefinitionVersion = "v2"
+
+ // Cilium Network Policy (CNP)
+
+ // CNPPluralName is the plural name of Cilium Network Policy
+ CNPPluralName = "ciliumnetworkpolicies"
+
+ // CNPKindDefinition is the kind name for Cilium Network Policy
+ CNPKindDefinition = "CiliumNetworkPolicy"
+
+ // CNPName is the full name of Cilium Network Policy
+ CNPName = CNPPluralName + "." + CustomResourceDefinitionGroup
+
+ // Cilium Cluster wide Network Policy (CCNP)
+
+ // CCNPPluralName is the plural name of Cilium Cluster wide Network Policy
+ CCNPPluralName = "ciliumclusterwidenetworkpolicies"
+
+ // CCNPKindDefinition is the kind name for Cilium Cluster wide Network Policy
+ CCNPKindDefinition = "CiliumClusterwideNetworkPolicy"
+
+ // CCNPName is the full name of Cilium Cluster wide Network Policy
+ CCNPName = CCNPPluralName + "." + CustomResourceDefinitionGroup
+
+ // Cilium Egress Gateway Policy (CEGP)
+
+ // CEGPPluralName is the plural name of Cilium Egress Gateway Policy
+ CEGPPluralName = "ciliumegressgatewaypolicies"
+
+ // CEGPKindDefinition is the kind name of Cilium Egress Gateway Policy
+ CEGPKindDefinition = "CiliumEgressGatewayPolicy"
+
+ // CEGPName is the full name of Cilium Egress Gateway Policy
+ CEGPName = CEGPPluralName + "." + CustomResourceDefinitionGroup
+
+ // Cilium Endpoint (CEP)
+
+ // CEPluralName is the plural name of Cilium Endpoint
+ CEPPluralName = "ciliumendpoints"
+
+ // CEKindDefinition is the kind name for Cilium Endpoint
+ CEPKindDefinition = "CiliumEndpoint"
+
+ // CEPName is the full name of Cilium Endpoint
+ CEPName = CEPPluralName + "." + CustomResourceDefinitionGroup
+
+ // Cilium Node (CN)
+
+ // CNPluralName is the plural name of Cilium Node
+ CNPluralName = "ciliumnodes"
+
+ // CNKindDefinition is the kind name for Cilium Node
+ CNKindDefinition = "CiliumNode"
+
+ // CNName is the full name of Cilium Node
+ CNName = CNPluralName + "." + CustomResourceDefinitionGroup
+
+ // Cilium Identity
+
+ // CIDPluralName is the plural name of Cilium Identity
+ CIDPluralName = "ciliumidentities"
+
+ // CIDKindDefinition is the kind name for Cilium Identity
+ CIDKindDefinition = "CiliumIdentity"
+
+ // CIDName is the full name of Cilium Identity
+ CIDName = CIDPluralName + "." + CustomResourceDefinitionGroup
+
+ // Cilium Local Redirect Policy (CLRP)
+
+ // CLRPPluralName is the plural name of Local Redirect Policy
+ CLRPPluralName = "ciliumlocalredirectpolicies"
+
+ // CLRPKindDefinition is the kind name for Local Redirect Policy
+ CLRPKindDefinition = "CiliumLocalRedirectPolicy"
+
+ // CLRPName is the full name of Local Redirect Policy
+ CLRPName = CLRPPluralName + "." + CustomResourceDefinitionGroup
+
+ // Cilium External Workload (CEW)
+
+ // CEWPluralName is the plural name of Cilium External Workload
+ CEWPluralName = "ciliumexternalworkloads"
+
+ // CEWKindDefinition is the kind name for Cilium External Workload
+ CEWKindDefinition = "CiliumExternalWorkload"
+
+ // CEWName is the full name of Cilium External Workload
+ CEWName = CEWPluralName + "." + CustomResourceDefinitionGroup
+
+ // Cilium Cluster Envoy Config (CCEC)
+
+ // CCECPluralName is the plural name of Cilium Clusterwide Envoy Config
+ CCECPluralName = "ciliumclusterwideenvoyconfigs"
+
+ // CCECKindDefinition is the kind name of Cilium Clusterwide Envoy Config
+ CCECKindDefinition = "CiliumClusterwideEnvoyConfig"
+
+ // CCECName is the full name of Cilium Clusterwide Envoy Config
+ CCECName = CCECPluralName + "." + CustomResourceDefinitionGroup
+
+ // Cilium Envoy Config (CEC)
+
+ // CECPluralName is the plural name of Cilium Envoy Config
+ CECPluralName = "ciliumenvoyconfigs"
+
+ // CECKindDefinition is the kind name of Cilium Envoy Config
+ CECKindDefinition = "CiliumEnvoyConfig"
+
+ // CECName is the full name of Cilium Envoy Config
+ CECName = CECPluralName + "." + CustomResourceDefinitionGroup
+)
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{
+ Group: CustomResourceDefinitionGroup,
+ Version: CustomResourceDefinitionVersion,
+}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+ return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+ // SchemeBuilder is needed by DeepCopy generator.
+ SchemeBuilder runtime.SchemeBuilder
+ // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
+ localSchemeBuilder = &SchemeBuilder
+
+ // AddToScheme adds all types of this clientset into the given scheme.
+ // This allows composition of clientsets, like in:
+ //
+ // import (
+ // "k8s.io/client-go/kubernetes"
+ // clientsetscheme "k8s.io/client-go/kubernetes/scheme"
+ // aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
+ // )
+ //
+ // kclientset, _ := kubernetes.NewForConfig(c)
+ // aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
+ AddToScheme = localSchemeBuilder.AddToScheme
+)
+
+func init() {
+ // We only register manually written functions here. The registration of the
+ // generated functions takes place in the generated files. The separation
+ // makes the code compile even when the generated files are missing.
+ localSchemeBuilder.Register(addKnownTypes)
+}
+
+// Adds the list of known types to api.Scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &CiliumNetworkPolicy{},
+ &CiliumNetworkPolicyList{},
+ &CiliumClusterwideNetworkPolicy{},
+ &CiliumClusterwideNetworkPolicyList{},
+ &CiliumEgressGatewayPolicy{},
+ &CiliumEgressGatewayPolicyList{},
+ &CiliumEndpoint{},
+ &CiliumEndpointList{},
+ &CiliumNode{},
+ &CiliumNodeList{},
+ &CiliumExternalWorkload{},
+ &CiliumExternalWorkloadList{},
+ &CiliumIdentity{},
+ &CiliumIdentityList{},
+ &CiliumLocalRedirectPolicy{},
+ &CiliumLocalRedirectPolicyList{},
+ &CiliumEnvoyConfig{},
+ &CiliumEnvoyConfigList{},
+ &CiliumClusterwideEnvoyConfig{},
+ &CiliumClusterwideEnvoyConfigList{},
+ )
+
+ metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/types.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/types.go
new file mode 100644
index 000000000..af821e8df
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/types.go
@@ -0,0 +1,467 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package v2
+
+import (
+ "sort"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ "github.com/cilium/cilium/api/v1/models"
+ alibabaCloudTypes "github.com/cilium/cilium/pkg/alibabacloud/eni/types"
+ eniTypes "github.com/cilium/cilium/pkg/aws/eni/types"
+ azureTypes "github.com/cilium/cilium/pkg/azure/types"
+ ipamTypes "github.com/cilium/cilium/pkg/ipam/types"
+ "github.com/cilium/cilium/pkg/node/addressing"
+)
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:openapi-gen=false
+// +kubebuilder:resource:categories={cilium},singular="ciliumendpoint",path="ciliumendpoints",scope="Namespaced",shortName={cep,ciliumep}
+// +kubebuilder:printcolumn:JSONPath=".status.id",description="Cilium endpoint id",name="Endpoint ID",type=integer
+// +kubebuilder:printcolumn:JSONPath=".status.identity.id",description="Cilium identity id",name="Identity ID",type=integer
+// +kubebuilder:printcolumn:JSONPath=".status.policy.ingress.state",description="Ingress enforcement in the endpoint",name="Ingress Enforcement",type=string
+// +kubebuilder:printcolumn:JSONPath=".status.policy.egress.state",description="Egress enforcement in the endpoint",name="Egress Enforcement",type=string
+// +kubebuilder:printcolumn:JSONPath=".status.visibility-policy-status",description="Status of visibility policy in the endpoint",name="Visibility Policy",type=string
+// +kubebuilder:printcolumn:JSONPath=".status.state",description="Endpoint current state",name="Endpoint State",type=string
+// +kubebuilder:printcolumn:JSONPath=".status.networking.addressing[0].ipv4",description="Endpoint IPv4 address",name="IPv4",type=string
+// +kubebuilder:printcolumn:JSONPath=".status.networking.addressing[0].ipv6",description="Endpoint IPv6 address",name="IPv6",type=string
+// +kubebuilder:storageversion
+
+// CiliumEndpoint is the status of a Cilium policy rule.
+type CiliumEndpoint struct {
+ // +deepequal-gen=false
+ metav1.TypeMeta `json:",inline"`
+ // +deepequal-gen=false
+ metav1.ObjectMeta `json:"metadata"`
+
+ // +kubebuilder:validation:Optional
+ Status EndpointStatus `json:"status"`
+}
+
+// EndpointPolicyState defines the state of the Policy mode: "enforcing", "non-enforcing", "disabled"
+type EndpointPolicyState string
+
+// EndpointStatus is the status of a Cilium endpoint.
+type EndpointStatus struct {
+ // ID is the cilium-agent-local ID of the endpoint.
+ ID int64 `json:"id,omitempty"`
+
+ // Controllers is the list of failing controllers for this endpoint.
+ Controllers ControllerList `json:"controllers,omitempty"`
+
+ // ExternalIdentifiers is a set of identifiers to identify the endpoint
+ // apart from the pod name. This includes container runtime IDs.
+ ExternalIdentifiers *models.EndpointIdentifiers `json:"external-identifiers,omitempty"`
+
+ // Health is the overall endpoint & subcomponent health.
+ Health *models.EndpointHealth `json:"health,omitempty"`
+
+ // Identity is the security identity associated with the endpoint
+ Identity *EndpointIdentity `json:"identity,omitempty"`
+
+ // Log is the list of the last few warning and error log entries
+ Log []*models.EndpointStatusChange `json:"log,omitempty"`
+
+ // Networking is the networking properties of the endpoint.
+ //
+ // +kubebuilder:validation:Optional
+ Networking *EndpointNetworking `json:"networking,omitempty"`
+
+ // Encryption is the encryption configuration of the node
+ //
+ // +kubebuilder:validation:Optional
+ Encryption EncryptionSpec `json:"encryption,omitempty"`
+
+ Policy *EndpointPolicy `json:"policy,omitempty"`
+
+ VisibilityPolicyStatus *string `json:"visibility-policy-status,omitempty"`
+
+ // State is the state of the endpoint.
+ //
+ // +kubebuilder:validation:Enum=creating;waiting-for-identity;not-ready;waiting-to-regenerate;regenerating;restoring;ready;disconnecting;disconnected;invalid
+ State string `json:"state,omitempty"`
+
+ NamedPorts models.NamedPorts `json:"named-ports,omitempty"`
+}
+
+// EndpointStatusLogEntries is the maximum number of log entries in
+// EndpointStatus.Log.
+const EndpointStatusLogEntries = 5
+
+// +k8s:deepcopy-gen=false
+
+// ControllerList is a list of ControllerStatus.
+type ControllerList []ControllerStatus
+
+// Sort sorts the ControllerList by controller name
+func (c ControllerList) Sort() {
+ sort.Slice(c, func(i, j int) bool { return c[i].Name < c[j].Name })
+}
+
+// ControllerStatus is the status of a failing controller.
+type ControllerStatus struct {
+ // Name is the name of the controller
+ Name string `json:"name,omitempty"`
+
+ // Configuration is the controller configuration
+ Configuration *models.ControllerStatusConfiguration `json:"configuration,omitempty"`
+
+ // Status is the status of the controller
+ Status ControllerStatusStatus `json:"status,omitempty"`
+
+ // UUID is the UUID of the controller
+ UUID string `json:"uuid,omitempty"`
+}
+
+// +k8s:deepcopy-gen=false
+
+// ControllerStatusStatus is the detailed status section of a controller.
+type ControllerStatusStatus struct {
+ ConsecutiveFailureCount int64 `json:"consecutive-failure-count,omitempty"`
+ FailureCount int64 `json:"failure-count,omitempty"`
+ LastFailureMsg string `json:"last-failure-msg,omitempty"`
+ LastFailureTimestamp string `json:"last-failure-timestamp,omitempty"`
+ LastSuccessTimestamp string `json:"last-success-timestamp,omitempty"`
+ SuccessCount int64 `json:"success-count,omitempty"`
+}
+
+// EndpointPolicy represents the endpoint's policy by listing all allowed
+// ingress and egress identities in combination with L4 port and protocol.
+type EndpointPolicy struct {
+ Ingress *EndpointPolicyDirection `json:"ingress,omitempty"`
+ Egress *EndpointPolicyDirection `json:"egress,omitempty"`
+}
+
+// EndpointPolicyDirection is the list of allowed identities per direction.
+type EndpointPolicyDirection struct {
+ Enforcing bool `json:"enforcing"`
+ Allowed AllowedIdentityList `json:"allowed,omitempty"`
+ Denied DenyIdentityList `json:"denied,omitempty"`
+ // Deprecated
+ Removing AllowedIdentityList `json:"removing,omitempty"`
+ // Deprecated
+ Adding AllowedIdentityList `json:"adding,omitempty"`
+ State EndpointPolicyState `json:"state,omitempty"`
+}
+
+// IdentityTuple specifies a peer by identity, destination port and protocol.
+type IdentityTuple struct {
+ Identity uint64 `json:"identity,omitempty"`
+ IdentityLabels map[string]string `json:"identity-labels,omitempty"`
+ DestPort uint16 `json:"dest-port,omitempty"`
+ Protocol uint8 `json:"protocol,omitempty"`
+}
+
+// +k8s:deepcopy-gen=false
+
+// IdentityList is a list of IdentityTuple.
+type IdentityList []IdentityTuple
+
+// Sort sorts a list IdentityList by numeric identity, port and protocol.
+func (a IdentityList) Sort() {
+ sort.Slice(a, func(i, j int) bool {
+ if a[i].Identity < a[j].Identity {
+ return true
+ } else if a[i].Identity == a[j].Identity {
+ if a[i].DestPort < a[j].DestPort {
+ return true
+ } else if a[i].DestPort == a[j].DestPort {
+ return a[i].Protocol < a[j].Protocol
+ }
+ }
+ return false
+ })
+}
+
+// +k8s:deepcopy-gen=false
+
+// AllowedIdentityList is a list of IdentityTuples that species peers that are
+// allowed.
+type AllowedIdentityList IdentityList
+
+// Sort sorts a list IdentityList by numeric identity, port and protocol.
+func (a AllowedIdentityList) Sort() {
+ IdentityList(a).Sort()
+}
+
+// +k8s:deepcopy-gen=false
+
+// DenyIdentityList is a list of IdentityTuples that species peers that are
+// denied.
+type DenyIdentityList IdentityList
+
+// Sort sorts a list IdentityList by numeric identity, port and protocol.
+func (d DenyIdentityList) Sort() {
+ IdentityList(d).Sort()
+}
+
+// EndpointIdentity is the identity information of an endpoint.
+type EndpointIdentity struct {
+ // ID is the numeric identity of the endpoint
+ ID int64 `json:"id,omitempty"`
+
+ // Labels is the list of labels associated with the identity
+ Labels []string `json:"labels,omitempty"`
+}
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +kubebuilder:resource:categories={cilium},singular="ciliumidentity",path="ciliumidentities",scope="Cluster",shortName={ciliumid}
+// +kubebuilder:printcolumn:JSONPath=".metadata.labels.io\\.kubernetes\\.pod\\.namespace",description="The namespace of the entity",name="Namespace",type=string
+// +kubebuilder:printcolumn:JSONPath=".metadata.creationTimestamp",description="The age of the identity",name="Age",type=date
+// +kubebuilder:subresource:status
+// +kubebuilder:storageversion
+
+// CiliumIdentity is a CRD that represents an identity managed by Cilium.
+// It is intended as a backing store for identity allocation, acting as the
+// global coordination backend, and can be used in place of a KVStore (such as
+// etcd).
+// The name of the CRD is the numeric identity and the labels on the CRD object
+// are the kubernetes sourced labels seen by cilium. This is currently the
+// only label source possible when running under kubernetes. Non-kubernetes
+// labels are filtered but all labels, from all sources, are places in the
+// SecurityLabels field. These also include the source and are used to define
+// the identity.
+// The labels under metav1.ObjectMeta can be used when searching for
+// CiliumIdentity instances that include particular labels. This can be done
+// with invocations such as:
+//
+// kubectl get ciliumid -l 'foo=bar'
+type CiliumIdentity struct {
+ // +deepequal-gen=false
+ metav1.TypeMeta `json:",inline"`
+ // +deepequal-gen=false
+ metav1.ObjectMeta `json:"metadata"`
+
+ // SecurityLabels is the source-of-truth set of labels for this identity.
+ SecurityLabels map[string]string `json:"security-labels"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +deepequal-gen=false
+
+// CiliumIdentityList is a list of CiliumIdentity objects.
+type CiliumIdentityList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata"`
+
+ // Items is a list of CiliumIdentity
+ Items []CiliumIdentity `json:"items"`
+}
+
+// +k8s:deepcopy-gen=false
+
+// AddressPair is a pair of IPv4 and/or IPv6 address.
+type AddressPair struct {
+ IPV4 string `json:"ipv4,omitempty"`
+ IPV6 string `json:"ipv6,omitempty"`
+}
+
+// +k8s:deepcopy-gen=false
+
+// AddressPairList is a list of address pairs.
+type AddressPairList []*AddressPair
+
+// Sort sorts an AddressPairList by IPv4 and IPv6 address.
+func (a AddressPairList) Sort() {
+ sort.Slice(a, func(i, j int) bool {
+ if a[i].IPV4 < a[j].IPV4 {
+ return true
+ } else if a[i].IPV4 == a[j].IPV4 {
+ return a[i].IPV6 < a[j].IPV6
+ }
+ return false
+ })
+}
+
+// EndpointNetworking is the addressing information of an endpoint.
+type EndpointNetworking struct {
+ // IP4/6 addresses assigned to this Endpoint
+ Addressing AddressPairList `json:"addressing"`
+
+ // NodeIP is the IP of the node the endpoint is running on. The IP must
+ // be reachable between nodes.
+ NodeIP string `json:"node,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:openapi-gen=false
+// +deepequal-gen=false
+
+// CiliumEndpointList is a list of CiliumEndpoint objects.
+type CiliumEndpointList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata"`
+
+ // Items is a list of CiliumEndpoint
+ Items []CiliumEndpoint `json:"items"`
+}
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +kubebuilder:resource:categories={cilium},singular="ciliumnode",path="ciliumnodes",scope="Cluster",shortName={cn,ciliumn}
+// +kubebuilder:printcolumn:JSONPath=".spec.addresses[?(@.type==\"CiliumInternalIP\")].ip",description="Cilium internal IP for this node",name="CiliumInternalIP",type=string
+// +kubebuilder:printcolumn:JSONPath=".spec.addresses[?(@.type==\"InternalIP\")].ip",description="IP of the node",name="InternalIP",type=string
+// +kubebuilder:printcolumn:JSONPath=".metadata.creationTimestamp",description="Time duration since creation of Ciliumnode",name="Age",type=date
+// +kubebuilder:storageversion
+// +kubebuilder:subresource:status
+
+// CiliumNode represents a node managed by Cilium. It contains a specification
+// to control various node specific configuration aspects and a status section
+// to represent the status of the node.
+type CiliumNode struct {
+ // +deepequal-gen=false
+ metav1.TypeMeta `json:",inline"`
+ // +deepequal-gen=false
+ metav1.ObjectMeta `json:"metadata"`
+
+ // Spec defines the desired specification/configuration of the node.
+ Spec NodeSpec `json:"spec"`
+
+ // Status defines the realized specification/configuration and status
+ // of the node.
+ //
+ // +kubebuilder:validation:Optional
+ Status NodeStatus `json:"status,omitempty"`
+}
+
+// NodeAddress is a node address.
+type NodeAddress struct {
+ // Type is the type of the node address
+ Type addressing.AddressType `json:"type,omitempty"`
+
+ // IP is an IP of a node
+ IP string `json:"ip,omitempty"`
+}
+
+// NodeSpec is the configuration specific to a node.
+type NodeSpec struct {
+ // InstanceID is the identifier of the node. This is different from the
+ // node name which is typically the FQDN of the node. The InstanceID
+ // typically refers to the identifier used by the cloud provider or
+ // some other means of identification.
+ InstanceID string `json:"instance-id,omitempty"`
+
+ // Addresses is the list of all node addresses.
+ //
+ // +kubebuilder:validation:Optional
+ Addresses []NodeAddress `json:"addresses,omitempty"`
+
+ // HealthAddressing is the addressing information for health connectivity
+ // checking.
+ //
+ // +kubebuilder:validation:Optional
+ HealthAddressing HealthAddressingSpec `json:"health,omitempty"`
+
+ // IngressAddressing is the addressing information for Ingress listener.
+ //
+ // +kubebuilder:validation:Optional
+ IngressAddressing AddressPair `json:"ingress,omitempty"`
+
+ // Encryption is the encryption configuration of the node.
+ //
+ // +kubebuilder:validation:Optional
+ Encryption EncryptionSpec `json:"encryption,omitempty"`
+
+ // ENI is the AWS ENI specific configuration.
+ //
+ // +kubebuilder:validation:Optional
+ ENI eniTypes.ENISpec `json:"eni,omitempty"`
+
+ // Azure is the Azure IPAM specific configuration.
+ //
+ // +kubebuilder:validation:Optional
+ Azure azureTypes.AzureSpec `json:"azure,omitempty"`
+
+ // AlibabaCloud is the AlibabaCloud IPAM specific configuration.
+ //
+ // +kubebuilder:validation:Optional
+ AlibabaCloud alibabaCloudTypes.Spec `json:"alibaba-cloud,omitempty"`
+
+ // IPAM is the address management specification. This section can be
+ // populated by a user or it can be automatically populated by an IPAM
+ // operator.
+ //
+ // +kubebuilder:validation:Optional
+ IPAM ipamTypes.IPAMSpec `json:"ipam,omitempty"`
+
+ // NodeIdentity is the Cilium numeric identity allocated for the node, if any.
+ //
+ // +kubebuilder:validation:Optional
+ NodeIdentity uint64 `json:"nodeidentity,omitempty"`
+}
+
+// HealthAddressingSpec is the addressing information required to do
+// connectivity health checking.
+type HealthAddressingSpec struct {
+ // IPv4 is the IPv4 address of the IPv4 health endpoint.
+ //
+ // +kubebuilder:validation:Optional
+ IPv4 string `json:"ipv4,omitempty"`
+
+ // IPv6 is the IPv6 address of the IPv4 health endpoint.
+ //
+ // +kubebuilder:validation:Optional
+ IPv6 string `json:"ipv6,omitempty"`
+}
+
+// EncryptionSpec defines the encryption relevant configuration of a node.
+type EncryptionSpec struct {
+ // Key is the index to the key to use for encryption or 0 if encryption is
+ // disabled.
+ //
+ // +kubebuilder:validation:Optional
+ Key int `json:"key,omitempty"`
+}
+
+// NodeStatus is the status of a node.
+type NodeStatus struct {
+ // ENI is the AWS ENI specific status of the node.
+ //
+ // +kubebuilder:validation:Optional
+ ENI eniTypes.ENIStatus `json:"eni,omitempty"`
+
+ // Azure is the Azure specific status of the node.
+ //
+ // +kubebuilder:validation:Optional
+ Azure azureTypes.AzureStatus `json:"azure,omitempty"`
+
+ // IPAM is the IPAM status of the node.
+ //
+ // +kubebuilder:validation:Optional
+ IPAM ipamTypes.IPAMStatus `json:"ipam,omitempty"`
+
+ // AlibabaCloud is the AlibabaCloud specific status of the node.
+ //
+ // +kubebuilder:validation:Optional
+ AlibabaCloud alibabaCloudTypes.ENIStatus `json:"alibaba-cloud,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +deepequal-gen=false
+
+// CiliumNodeList is a list of CiliumNode objects.
+type CiliumNodeList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata"`
+
+ // Items is a list of CiliumNode
+ Items []CiliumNode `json:"items"`
+}
+
+// InstanceID returns the InstanceID of a CiliumNode.
+func (n *CiliumNode) InstanceID() (instanceID string) {
+ if n != nil {
+ instanceID = n.Spec.InstanceID
+ // OBSOLETE: This fallback can be removed in Cilium 1.9
+ if instanceID == "" {
+ instanceID = n.Spec.ENI.InstanceID
+ }
+ }
+ return
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/zz_generated.deepcopy.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/zz_generated.deepcopy.go
new file mode 100644
index 000000000..cf85aaf6d
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/zz_generated.deepcopy.go
@@ -0,0 +1,1395 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v2
+
+import (
+ models "github.com/cilium/cilium/api/v1/models"
+ v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1"
+ api "github.com/cilium/cilium/pkg/policy/api"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CiliumClusterwideEnvoyConfig) DeepCopyInto(out *CiliumClusterwideEnvoyConfig) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumClusterwideEnvoyConfig.
+func (in *CiliumClusterwideEnvoyConfig) DeepCopy() *CiliumClusterwideEnvoyConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(CiliumClusterwideEnvoyConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *CiliumClusterwideEnvoyConfig) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CiliumClusterwideEnvoyConfigList) DeepCopyInto(out *CiliumClusterwideEnvoyConfigList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]CiliumClusterwideEnvoyConfig, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumClusterwideEnvoyConfigList.
+func (in *CiliumClusterwideEnvoyConfigList) DeepCopy() *CiliumClusterwideEnvoyConfigList {
+ if in == nil {
+ return nil
+ }
+ out := new(CiliumClusterwideEnvoyConfigList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *CiliumClusterwideEnvoyConfigList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CiliumClusterwideNetworkPolicy) DeepCopyInto(out *CiliumClusterwideNetworkPolicy) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ if in.Spec != nil {
+ in, out := &in.Spec, &out.Spec
+ *out = new(api.Rule)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Specs != nil {
+ in, out := &in.Specs, &out.Specs
+ *out = make(api.Rules, len(*in))
+ for i := range *in {
+ if (*in)[i] != nil {
+ in, out := &(*in)[i], &(*out)[i]
+ *out = new(api.Rule)
+ (*in).DeepCopyInto(*out)
+ }
+ }
+ }
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumClusterwideNetworkPolicy.
+func (in *CiliumClusterwideNetworkPolicy) DeepCopy() *CiliumClusterwideNetworkPolicy {
+ if in == nil {
+ return nil
+ }
+ out := new(CiliumClusterwideNetworkPolicy)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *CiliumClusterwideNetworkPolicy) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CiliumClusterwideNetworkPolicyList) DeepCopyInto(out *CiliumClusterwideNetworkPolicyList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]CiliumClusterwideNetworkPolicy, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumClusterwideNetworkPolicyList.
+func (in *CiliumClusterwideNetworkPolicyList) DeepCopy() *CiliumClusterwideNetworkPolicyList {
+ if in == nil {
+ return nil
+ }
+ out := new(CiliumClusterwideNetworkPolicyList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *CiliumClusterwideNetworkPolicyList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CiliumEgressGatewayPolicy) DeepCopyInto(out *CiliumEgressGatewayPolicy) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumEgressGatewayPolicy.
+func (in *CiliumEgressGatewayPolicy) DeepCopy() *CiliumEgressGatewayPolicy {
+ if in == nil {
+ return nil
+ }
+ out := new(CiliumEgressGatewayPolicy)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *CiliumEgressGatewayPolicy) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CiliumEgressGatewayPolicyList) DeepCopyInto(out *CiliumEgressGatewayPolicyList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]CiliumEgressGatewayPolicy, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumEgressGatewayPolicyList.
+func (in *CiliumEgressGatewayPolicyList) DeepCopy() *CiliumEgressGatewayPolicyList {
+ if in == nil {
+ return nil
+ }
+ out := new(CiliumEgressGatewayPolicyList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *CiliumEgressGatewayPolicyList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CiliumEgressGatewayPolicySpec) DeepCopyInto(out *CiliumEgressGatewayPolicySpec) {
+ *out = *in
+ if in.Selectors != nil {
+ in, out := &in.Selectors, &out.Selectors
+ *out = make([]EgressRule, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.DestinationCIDRs != nil {
+ in, out := &in.DestinationCIDRs, &out.DestinationCIDRs
+ *out = make([]IPv4CIDR, len(*in))
+ copy(*out, *in)
+ }
+ if in.ExcludedCIDRs != nil {
+ in, out := &in.ExcludedCIDRs, &out.ExcludedCIDRs
+ *out = make([]IPv4CIDR, len(*in))
+ copy(*out, *in)
+ }
+ if in.EgressGateway != nil {
+ in, out := &in.EgressGateway, &out.EgressGateway
+ *out = new(EgressGateway)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumEgressGatewayPolicySpec.
+func (in *CiliumEgressGatewayPolicySpec) DeepCopy() *CiliumEgressGatewayPolicySpec {
+ if in == nil {
+ return nil
+ }
+ out := new(CiliumEgressGatewayPolicySpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CiliumEndpoint) DeepCopyInto(out *CiliumEndpoint) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumEndpoint.
+func (in *CiliumEndpoint) DeepCopy() *CiliumEndpoint {
+ if in == nil {
+ return nil
+ }
+ out := new(CiliumEndpoint)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *CiliumEndpoint) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CiliumEndpointList) DeepCopyInto(out *CiliumEndpointList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]CiliumEndpoint, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumEndpointList.
+func (in *CiliumEndpointList) DeepCopy() *CiliumEndpointList {
+ if in == nil {
+ return nil
+ }
+ out := new(CiliumEndpointList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *CiliumEndpointList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CiliumEnvoyConfig) DeepCopyInto(out *CiliumEnvoyConfig) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumEnvoyConfig.
+func (in *CiliumEnvoyConfig) DeepCopy() *CiliumEnvoyConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(CiliumEnvoyConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *CiliumEnvoyConfig) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CiliumEnvoyConfigList) DeepCopyInto(out *CiliumEnvoyConfigList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]CiliumEnvoyConfig, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumEnvoyConfigList.
+func (in *CiliumEnvoyConfigList) DeepCopy() *CiliumEnvoyConfigList {
+ if in == nil {
+ return nil
+ }
+ out := new(CiliumEnvoyConfigList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *CiliumEnvoyConfigList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CiliumEnvoyConfigSpec) DeepCopyInto(out *CiliumEnvoyConfigSpec) {
+ *out = *in
+ if in.Services != nil {
+ in, out := &in.Services, &out.Services
+ *out = make([]*ServiceListener, len(*in))
+ for i := range *in {
+ if (*in)[i] != nil {
+ in, out := &(*in)[i], &(*out)[i]
+ *out = new(ServiceListener)
+ **out = **in
+ }
+ }
+ }
+ if in.BackendServices != nil {
+ in, out := &in.BackendServices, &out.BackendServices
+ *out = make([]*Service, len(*in))
+ for i := range *in {
+ if (*in)[i] != nil {
+ in, out := &(*in)[i], &(*out)[i]
+ *out = new(Service)
+ (*in).DeepCopyInto(*out)
+ }
+ }
+ }
+ if in.Resources != nil {
+ in, out := &in.Resources, &out.Resources
+ *out = make([]XDSResource, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumEnvoyConfigSpec.
+func (in *CiliumEnvoyConfigSpec) DeepCopy() *CiliumEnvoyConfigSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(CiliumEnvoyConfigSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CiliumExternalWorkload) DeepCopyInto(out *CiliumExternalWorkload) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ out.Spec = in.Spec
+ out.Status = in.Status
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumExternalWorkload.
+func (in *CiliumExternalWorkload) DeepCopy() *CiliumExternalWorkload {
+ if in == nil {
+ return nil
+ }
+ out := new(CiliumExternalWorkload)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *CiliumExternalWorkload) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CiliumExternalWorkloadList) DeepCopyInto(out *CiliumExternalWorkloadList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]CiliumExternalWorkload, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumExternalWorkloadList.
+func (in *CiliumExternalWorkloadList) DeepCopy() *CiliumExternalWorkloadList {
+ if in == nil {
+ return nil
+ }
+ out := new(CiliumExternalWorkloadList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *CiliumExternalWorkloadList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CiliumExternalWorkloadSpec) DeepCopyInto(out *CiliumExternalWorkloadSpec) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumExternalWorkloadSpec.
+func (in *CiliumExternalWorkloadSpec) DeepCopy() *CiliumExternalWorkloadSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(CiliumExternalWorkloadSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CiliumExternalWorkloadStatus) DeepCopyInto(out *CiliumExternalWorkloadStatus) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumExternalWorkloadStatus.
+func (in *CiliumExternalWorkloadStatus) DeepCopy() *CiliumExternalWorkloadStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(CiliumExternalWorkloadStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CiliumIdentity) DeepCopyInto(out *CiliumIdentity) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ if in.SecurityLabels != nil {
+ in, out := &in.SecurityLabels, &out.SecurityLabels
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumIdentity.
+func (in *CiliumIdentity) DeepCopy() *CiliumIdentity {
+ if in == nil {
+ return nil
+ }
+ out := new(CiliumIdentity)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *CiliumIdentity) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CiliumIdentityList) DeepCopyInto(out *CiliumIdentityList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]CiliumIdentity, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumIdentityList.
+func (in *CiliumIdentityList) DeepCopy() *CiliumIdentityList {
+ if in == nil {
+ return nil
+ }
+ out := new(CiliumIdentityList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *CiliumIdentityList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CiliumLocalRedirectPolicy) DeepCopyInto(out *CiliumLocalRedirectPolicy) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ out.Status = in.Status
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumLocalRedirectPolicy.
+func (in *CiliumLocalRedirectPolicy) DeepCopy() *CiliumLocalRedirectPolicy {
+ if in == nil {
+ return nil
+ }
+ out := new(CiliumLocalRedirectPolicy)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *CiliumLocalRedirectPolicy) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CiliumLocalRedirectPolicyList) DeepCopyInto(out *CiliumLocalRedirectPolicyList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]CiliumLocalRedirectPolicy, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumLocalRedirectPolicyList.
+func (in *CiliumLocalRedirectPolicyList) DeepCopy() *CiliumLocalRedirectPolicyList {
+ if in == nil {
+ return nil
+ }
+ out := new(CiliumLocalRedirectPolicyList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *CiliumLocalRedirectPolicyList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CiliumLocalRedirectPolicySpec) DeepCopyInto(out *CiliumLocalRedirectPolicySpec) {
+ *out = *in
+ in.RedirectFrontend.DeepCopyInto(&out.RedirectFrontend)
+ in.RedirectBackend.DeepCopyInto(&out.RedirectBackend)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumLocalRedirectPolicySpec.
+func (in *CiliumLocalRedirectPolicySpec) DeepCopy() *CiliumLocalRedirectPolicySpec {
+ if in == nil {
+ return nil
+ }
+ out := new(CiliumLocalRedirectPolicySpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CiliumLocalRedirectPolicyStatus) DeepCopyInto(out *CiliumLocalRedirectPolicyStatus) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumLocalRedirectPolicyStatus.
+func (in *CiliumLocalRedirectPolicyStatus) DeepCopy() *CiliumLocalRedirectPolicyStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(CiliumLocalRedirectPolicyStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CiliumNetworkPolicy) DeepCopyInto(out *CiliumNetworkPolicy) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ if in.Spec != nil {
+ in, out := &in.Spec, &out.Spec
+ *out = new(api.Rule)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Specs != nil {
+ in, out := &in.Specs, &out.Specs
+ *out = make(api.Rules, len(*in))
+ for i := range *in {
+ if (*in)[i] != nil {
+ in, out := &(*in)[i], &(*out)[i]
+ *out = new(api.Rule)
+ (*in).DeepCopyInto(*out)
+ }
+ }
+ }
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumNetworkPolicy.
+func (in *CiliumNetworkPolicy) DeepCopy() *CiliumNetworkPolicy {
+ if in == nil {
+ return nil
+ }
+ out := new(CiliumNetworkPolicy)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *CiliumNetworkPolicy) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CiliumNetworkPolicyList) DeepCopyInto(out *CiliumNetworkPolicyList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]CiliumNetworkPolicy, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumNetworkPolicyList.
+func (in *CiliumNetworkPolicyList) DeepCopy() *CiliumNetworkPolicyList {
+ if in == nil {
+ return nil
+ }
+ out := new(CiliumNetworkPolicyList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *CiliumNetworkPolicyList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CiliumNetworkPolicyNodeStatus) DeepCopyInto(out *CiliumNetworkPolicyNodeStatus) {
+ *out = *in
+ in.LastUpdated.DeepCopyInto(&out.LastUpdated)
+ if in.Annotations != nil {
+ in, out := &in.Annotations, &out.Annotations
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumNetworkPolicyNodeStatus.
+func (in *CiliumNetworkPolicyNodeStatus) DeepCopy() *CiliumNetworkPolicyNodeStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(CiliumNetworkPolicyNodeStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CiliumNetworkPolicyStatus) DeepCopyInto(out *CiliumNetworkPolicyStatus) {
+ *out = *in
+ if in.Nodes != nil {
+ in, out := &in.Nodes, &out.Nodes
+ *out = make(map[string]CiliumNetworkPolicyNodeStatus, len(*in))
+ for key, val := range *in {
+ (*out)[key] = *val.DeepCopy()
+ }
+ }
+ if in.DerivativePolicies != nil {
+ in, out := &in.DerivativePolicies, &out.DerivativePolicies
+ *out = make(map[string]CiliumNetworkPolicyNodeStatus, len(*in))
+ for key, val := range *in {
+ (*out)[key] = *val.DeepCopy()
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumNetworkPolicyStatus.
+func (in *CiliumNetworkPolicyStatus) DeepCopy() *CiliumNetworkPolicyStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(CiliumNetworkPolicyStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CiliumNode) DeepCopyInto(out *CiliumNode) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumNode.
+func (in *CiliumNode) DeepCopy() *CiliumNode {
+ if in == nil {
+ return nil
+ }
+ out := new(CiliumNode)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *CiliumNode) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CiliumNodeList) DeepCopyInto(out *CiliumNodeList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]CiliumNode, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumNodeList.
+func (in *CiliumNodeList) DeepCopy() *CiliumNodeList {
+ if in == nil {
+ return nil
+ }
+ out := new(CiliumNodeList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *CiliumNodeList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ControllerStatus) DeepCopyInto(out *ControllerStatus) {
+ *out = *in
+ if in.Configuration != nil {
+ in, out := &in.Configuration, &out.Configuration
+ *out = new(models.ControllerStatusConfiguration)
+ **out = **in
+ }
+ out.Status = in.Status
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerStatus.
+func (in *ControllerStatus) DeepCopy() *ControllerStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ControllerStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EgressGateway) DeepCopyInto(out *EgressGateway) {
+ *out = *in
+ if in.NodeSelector != nil {
+ in, out := &in.NodeSelector, &out.NodeSelector
+ *out = new(v1.LabelSelector)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressGateway.
+func (in *EgressGateway) DeepCopy() *EgressGateway {
+ if in == nil {
+ return nil
+ }
+ out := new(EgressGateway)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EgressRule) DeepCopyInto(out *EgressRule) {
+ *out = *in
+ if in.NamespaceSelector != nil {
+ in, out := &in.NamespaceSelector, &out.NamespaceSelector
+ *out = new(v1.LabelSelector)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.PodSelector != nil {
+ in, out := &in.PodSelector, &out.PodSelector
+ *out = new(v1.LabelSelector)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressRule.
+func (in *EgressRule) DeepCopy() *EgressRule {
+ if in == nil {
+ return nil
+ }
+ out := new(EgressRule)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EncryptionSpec) DeepCopyInto(out *EncryptionSpec) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionSpec.
+func (in *EncryptionSpec) DeepCopy() *EncryptionSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(EncryptionSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EndpointIdentity) DeepCopyInto(out *EndpointIdentity) {
+ *out = *in
+ if in.Labels != nil {
+ in, out := &in.Labels, &out.Labels
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointIdentity.
+func (in *EndpointIdentity) DeepCopy() *EndpointIdentity {
+ if in == nil {
+ return nil
+ }
+ out := new(EndpointIdentity)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EndpointNetworking) DeepCopyInto(out *EndpointNetworking) {
+ *out = *in
+ if in.Addressing != nil {
+ in, out := &in.Addressing, &out.Addressing
+ *out = make(AddressPairList, len(*in))
+ for i := range *in {
+ if (*in)[i] != nil {
+ in, out := &(*in)[i], &(*out)[i]
+ *out = new(AddressPair)
+ **out = **in
+ }
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointNetworking.
+func (in *EndpointNetworking) DeepCopy() *EndpointNetworking {
+ if in == nil {
+ return nil
+ }
+ out := new(EndpointNetworking)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EndpointPolicy) DeepCopyInto(out *EndpointPolicy) {
+ *out = *in
+ if in.Ingress != nil {
+ in, out := &in.Ingress, &out.Ingress
+ *out = new(EndpointPolicyDirection)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Egress != nil {
+ in, out := &in.Egress, &out.Egress
+ *out = new(EndpointPolicyDirection)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointPolicy.
+func (in *EndpointPolicy) DeepCopy() *EndpointPolicy {
+ if in == nil {
+ return nil
+ }
+ out := new(EndpointPolicy)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EndpointPolicyDirection) DeepCopyInto(out *EndpointPolicyDirection) {
+ *out = *in
+ if in.Allowed != nil {
+ in, out := &in.Allowed, &out.Allowed
+ *out = make(AllowedIdentityList, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Denied != nil {
+ in, out := &in.Denied, &out.Denied
+ *out = make(DenyIdentityList, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Removing != nil {
+ in, out := &in.Removing, &out.Removing
+ *out = make(AllowedIdentityList, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Adding != nil {
+ in, out := &in.Adding, &out.Adding
+ *out = make(AllowedIdentityList, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointPolicyDirection.
+func (in *EndpointPolicyDirection) DeepCopy() *EndpointPolicyDirection {
+ if in == nil {
+ return nil
+ }
+ out := new(EndpointPolicyDirection)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EndpointStatus) DeepCopyInto(out *EndpointStatus) {
+ *out = *in
+ if in.Controllers != nil {
+ in, out := &in.Controllers, &out.Controllers
+ *out = make(ControllerList, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.ExternalIdentifiers != nil {
+ in, out := &in.ExternalIdentifiers, &out.ExternalIdentifiers
+ *out = new(models.EndpointIdentifiers)
+ **out = **in
+ }
+ if in.Health != nil {
+ in, out := &in.Health, &out.Health
+ *out = new(models.EndpointHealth)
+ **out = **in
+ }
+ if in.Identity != nil {
+ in, out := &in.Identity, &out.Identity
+ *out = new(EndpointIdentity)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Log != nil {
+ in, out := &in.Log, &out.Log
+ *out = make([]*models.EndpointStatusChange, len(*in))
+ for i := range *in {
+ if (*in)[i] != nil {
+ in, out := &(*in)[i], &(*out)[i]
+ *out = new(models.EndpointStatusChange)
+ **out = **in
+ }
+ }
+ }
+ if in.Networking != nil {
+ in, out := &in.Networking, &out.Networking
+ *out = new(EndpointNetworking)
+ (*in).DeepCopyInto(*out)
+ }
+ out.Encryption = in.Encryption
+ if in.Policy != nil {
+ in, out := &in.Policy, &out.Policy
+ *out = new(EndpointPolicy)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.VisibilityPolicyStatus != nil {
+ in, out := &in.VisibilityPolicyStatus, &out.VisibilityPolicyStatus
+ *out = new(string)
+ **out = **in
+ }
+ if in.NamedPorts != nil {
+ in, out := &in.NamedPorts, &out.NamedPorts
+ *out = make(models.NamedPorts, len(*in))
+ for i := range *in {
+ if (*in)[i] != nil {
+ in, out := &(*in)[i], &(*out)[i]
+ *out = new(models.Port)
+ **out = **in
+ }
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointStatus.
+func (in *EndpointStatus) DeepCopy() *EndpointStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(EndpointStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Frontend) DeepCopyInto(out *Frontend) {
+ *out = *in
+ if in.ToPorts != nil {
+ in, out := &in.ToPorts, &out.ToPorts
+ *out = make([]PortInfo, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Frontend.
+func (in *Frontend) DeepCopy() *Frontend {
+ if in == nil {
+ return nil
+ }
+ out := new(Frontend)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HealthAddressingSpec) DeepCopyInto(out *HealthAddressingSpec) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthAddressingSpec.
+func (in *HealthAddressingSpec) DeepCopy() *HealthAddressingSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(HealthAddressingSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IdentityTuple) DeepCopyInto(out *IdentityTuple) {
+ *out = *in
+ if in.IdentityLabels != nil {
+ in, out := &in.IdentityLabels, &out.IdentityLabels
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityTuple.
+func (in *IdentityTuple) DeepCopy() *IdentityTuple {
+ if in == nil {
+ return nil
+ }
+ out := new(IdentityTuple)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodeAddress) DeepCopyInto(out *NodeAddress) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeAddress.
+func (in *NodeAddress) DeepCopy() *NodeAddress {
+ if in == nil {
+ return nil
+ }
+ out := new(NodeAddress)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodeSpec) DeepCopyInto(out *NodeSpec) {
+ *out = *in
+ if in.Addresses != nil {
+ in, out := &in.Addresses, &out.Addresses
+ *out = make([]NodeAddress, len(*in))
+ copy(*out, *in)
+ }
+ out.HealthAddressing = in.HealthAddressing
+ out.IngressAddressing = in.IngressAddressing
+ out.Encryption = in.Encryption
+ in.ENI.DeepCopyInto(&out.ENI)
+ out.Azure = in.Azure
+ in.AlibabaCloud.DeepCopyInto(&out.AlibabaCloud)
+ in.IPAM.DeepCopyInto(&out.IPAM)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSpec.
+func (in *NodeSpec) DeepCopy() *NodeSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(NodeSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NodeStatus) DeepCopyInto(out *NodeStatus) {
+ *out = *in
+ in.ENI.DeepCopyInto(&out.ENI)
+ in.Azure.DeepCopyInto(&out.Azure)
+ in.IPAM.DeepCopyInto(&out.IPAM)
+ in.AlibabaCloud.DeepCopyInto(&out.AlibabaCloud)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeStatus.
+func (in *NodeStatus) DeepCopy() *NodeStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(NodeStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PortInfo) DeepCopyInto(out *PortInfo) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortInfo.
+func (in *PortInfo) DeepCopy() *PortInfo {
+ if in == nil {
+ return nil
+ }
+ out := new(PortInfo)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RedirectBackend) DeepCopyInto(out *RedirectBackend) {
+ *out = *in
+ in.LocalEndpointSelector.DeepCopyInto(&out.LocalEndpointSelector)
+ if in.ToPorts != nil {
+ in, out := &in.ToPorts, &out.ToPorts
+ *out = make([]PortInfo, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedirectBackend.
+func (in *RedirectBackend) DeepCopy() *RedirectBackend {
+ if in == nil {
+ return nil
+ }
+ out := new(RedirectBackend)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RedirectFrontend) DeepCopyInto(out *RedirectFrontend) {
+ *out = *in
+ if in.AddressMatcher != nil {
+ in, out := &in.AddressMatcher, &out.AddressMatcher
+ *out = new(Frontend)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.ServiceMatcher != nil {
+ in, out := &in.ServiceMatcher, &out.ServiceMatcher
+ *out = new(ServiceInfo)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedirectFrontend.
+func (in *RedirectFrontend) DeepCopy() *RedirectFrontend {
+ if in == nil {
+ return nil
+ }
+ out := new(RedirectFrontend)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Service) DeepCopyInto(out *Service) {
+ *out = *in
+ if in.Ports != nil {
+ in, out := &in.Ports, &out.Ports
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Service.
+func (in *Service) DeepCopy() *Service {
+ if in == nil {
+ return nil
+ }
+ out := new(Service)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ServiceInfo) DeepCopyInto(out *ServiceInfo) {
+ *out = *in
+ if in.ToPorts != nil {
+ in, out := &in.ToPorts, &out.ToPorts
+ *out = make([]PortInfo, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceInfo.
+func (in *ServiceInfo) DeepCopy() *ServiceInfo {
+ if in == nil {
+ return nil
+ }
+ out := new(ServiceInfo)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ServiceListener) DeepCopyInto(out *ServiceListener) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceListener.
+func (in *ServiceListener) DeepCopy() *ServiceListener {
+ if in == nil {
+ return nil
+ }
+ out := new(ServiceListener)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new XDSResource.
+func (in *XDSResource) DeepCopy() *XDSResource {
+ if in == nil {
+ return nil
+ }
+ out := new(XDSResource)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/zz_generated.deepequal.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/zz_generated.deepequal.go
new file mode 100644
index 000000000..7c2a85522
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/zz_generated.deepequal.go
@@ -0,0 +1,1312 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by deepequal-gen. DO NOT EDIT.
+
+package v2
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *AddressPair) DeepEqual(other *AddressPair) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.IPV4 != other.IPV4 {
+ return false
+ }
+ if in.IPV6 != other.IPV6 {
+ return false
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *AddressPairList) DeepEqual(other *AddressPairList) bool {
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if !inElement.DeepEqual((*other)[i]) {
+ return false
+ }
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *AllowedIdentityList) DeepEqual(other *AllowedIdentityList) bool {
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if !inElement.DeepEqual(&(*other)[i]) {
+ return false
+ }
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *CiliumClusterwideEnvoyConfig) DeepEqual(other *CiliumClusterwideEnvoyConfig) bool {
+ if other == nil {
+ return false
+ }
+
+ if !in.Spec.DeepEqual(&other.Spec) {
+ return false
+ }
+
+ return true
+}
+
+// deepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *CiliumClusterwideNetworkPolicy) deepEqual(other *CiliumClusterwideNetworkPolicy) bool {
+ if other == nil {
+ return false
+ }
+
+ if (in.Spec == nil) != (other.Spec == nil) {
+ return false
+ } else if in.Spec != nil {
+ if !in.Spec.DeepEqual(other.Spec) {
+ return false
+ }
+ }
+
+ if ((in.Specs != nil) && (other.Specs != nil)) || ((in.Specs == nil) != (other.Specs == nil)) {
+ in, other := &in.Specs, &other.Specs
+ if other == nil || !in.DeepEqual(other) {
+ return false
+ }
+ }
+
+ if !in.Status.DeepEqual(&other.Status) {
+ return false
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *CiliumEgressGatewayPolicy) DeepEqual(other *CiliumEgressGatewayPolicy) bool {
+ if other == nil {
+ return false
+ }
+
+ if !in.Spec.DeepEqual(&other.Spec) {
+ return false
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *CiliumEgressGatewayPolicySpec) DeepEqual(other *CiliumEgressGatewayPolicySpec) bool {
+ if other == nil {
+ return false
+ }
+
+ if ((in.Selectors != nil) && (other.Selectors != nil)) || ((in.Selectors == nil) != (other.Selectors == nil)) {
+ in, other := &in.Selectors, &other.Selectors
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if !inElement.DeepEqual(&(*other)[i]) {
+ return false
+ }
+ }
+ }
+ }
+
+ if ((in.DestinationCIDRs != nil) && (other.DestinationCIDRs != nil)) || ((in.DestinationCIDRs == nil) != (other.DestinationCIDRs == nil)) {
+ in, other := &in.DestinationCIDRs, &other.DestinationCIDRs
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if inElement != (*other)[i] {
+ return false
+ }
+ }
+ }
+ }
+
+ if ((in.ExcludedCIDRs != nil) && (other.ExcludedCIDRs != nil)) || ((in.ExcludedCIDRs == nil) != (other.ExcludedCIDRs == nil)) {
+ in, other := &in.ExcludedCIDRs, &other.ExcludedCIDRs
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if inElement != (*other)[i] {
+ return false
+ }
+ }
+ }
+ }
+
+ if (in.EgressGateway == nil) != (other.EgressGateway == nil) {
+ return false
+ } else if in.EgressGateway != nil {
+ if !in.EgressGateway.DeepEqual(other.EgressGateway) {
+ return false
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *CiliumEndpoint) DeepEqual(other *CiliumEndpoint) bool {
+ if other == nil {
+ return false
+ }
+
+ if !in.Status.DeepEqual(&other.Status) {
+ return false
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *CiliumEnvoyConfig) DeepEqual(other *CiliumEnvoyConfig) bool {
+ if other == nil {
+ return false
+ }
+
+ if !in.Spec.DeepEqual(&other.Spec) {
+ return false
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *CiliumEnvoyConfigSpec) DeepEqual(other *CiliumEnvoyConfigSpec) bool {
+ if other == nil {
+ return false
+ }
+
+ if ((in.Services != nil) && (other.Services != nil)) || ((in.Services == nil) != (other.Services == nil)) {
+ in, other := &in.Services, &other.Services
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if !inElement.DeepEqual((*other)[i]) {
+ return false
+ }
+ }
+ }
+ }
+
+ if ((in.BackendServices != nil) && (other.BackendServices != nil)) || ((in.BackendServices == nil) != (other.BackendServices == nil)) {
+ in, other := &in.BackendServices, &other.BackendServices
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if !inElement.DeepEqual((*other)[i]) {
+ return false
+ }
+ }
+ }
+ }
+
+ if ((in.Resources != nil) && (other.Resources != nil)) || ((in.Resources == nil) != (other.Resources == nil)) {
+ in, other := &in.Resources, &other.Resources
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if !inElement.DeepEqual(&(*other)[i]) {
+ return false
+ }
+ }
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *CiliumExternalWorkload) DeepEqual(other *CiliumExternalWorkload) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.Spec != other.Spec {
+ return false
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *CiliumExternalWorkloadSpec) DeepEqual(other *CiliumExternalWorkloadSpec) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.IPv4AllocCIDR != other.IPv4AllocCIDR {
+ return false
+ }
+ if in.IPv6AllocCIDR != other.IPv6AllocCIDR {
+ return false
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *CiliumExternalWorkloadStatus) DeepEqual(other *CiliumExternalWorkloadStatus) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.ID != other.ID {
+ return false
+ }
+ if in.IP != other.IP {
+ return false
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *CiliumIdentity) DeepEqual(other *CiliumIdentity) bool {
+ if other == nil {
+ return false
+ }
+
+ if ((in.SecurityLabels != nil) && (other.SecurityLabels != nil)) || ((in.SecurityLabels == nil) != (other.SecurityLabels == nil)) {
+ in, other := &in.SecurityLabels, &other.SecurityLabels
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for key, inValue := range *in {
+ if otherValue, present := (*other)[key]; !present {
+ return false
+ } else {
+ if inValue != otherValue {
+ return false
+ }
+ }
+ }
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *CiliumLocalRedirectPolicy) DeepEqual(other *CiliumLocalRedirectPolicy) bool {
+ if other == nil {
+ return false
+ }
+
+ if !in.Spec.DeepEqual(&other.Spec) {
+ return false
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *CiliumLocalRedirectPolicySpec) DeepEqual(other *CiliumLocalRedirectPolicySpec) bool {
+ if other == nil {
+ return false
+ }
+
+ if !in.RedirectFrontend.DeepEqual(&other.RedirectFrontend) {
+ return false
+ }
+
+ if !in.RedirectBackend.DeepEqual(&other.RedirectBackend) {
+ return false
+ }
+
+ if in.Description != other.Description {
+ return false
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *CiliumLocalRedirectPolicyStatus) DeepEqual(other *CiliumLocalRedirectPolicyStatus) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.OK != other.OK {
+ return false
+ }
+
+ return true
+}
+
+// deepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *CiliumNetworkPolicy) deepEqual(other *CiliumNetworkPolicy) bool {
+ if other == nil {
+ return false
+ }
+
+ if (in.Spec == nil) != (other.Spec == nil) {
+ return false
+ } else if in.Spec != nil {
+ if !in.Spec.DeepEqual(other.Spec) {
+ return false
+ }
+ }
+
+ if ((in.Specs != nil) && (other.Specs != nil)) || ((in.Specs == nil) != (other.Specs == nil)) {
+ in, other := &in.Specs, &other.Specs
+ if other == nil || !in.DeepEqual(other) {
+ return false
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *CiliumNetworkPolicyNodeStatus) DeepEqual(other *CiliumNetworkPolicyNodeStatus) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.OK != other.OK {
+ return false
+ }
+ if in.Error != other.Error {
+ return false
+ }
+ if !in.LastUpdated.DeepEqual(&other.LastUpdated) {
+ return false
+ }
+
+ if in.Revision != other.Revision {
+ return false
+ }
+ if in.Enforcing != other.Enforcing {
+ return false
+ }
+ if ((in.Annotations != nil) && (other.Annotations != nil)) || ((in.Annotations == nil) != (other.Annotations == nil)) {
+ in, other := &in.Annotations, &other.Annotations
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for key, inValue := range *in {
+ if otherValue, present := (*other)[key]; !present {
+ return false
+ } else {
+ if inValue != otherValue {
+ return false
+ }
+ }
+ }
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *CiliumNetworkPolicyStatus) DeepEqual(other *CiliumNetworkPolicyStatus) bool {
+ if other == nil {
+ return false
+ }
+
+ if ((in.Nodes != nil) && (other.Nodes != nil)) || ((in.Nodes == nil) != (other.Nodes == nil)) {
+ in, other := &in.Nodes, &other.Nodes
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for key, inValue := range *in {
+ if otherValue, present := (*other)[key]; !present {
+ return false
+ } else {
+ if !inValue.DeepEqual(&otherValue) {
+ return false
+ }
+ }
+ }
+ }
+ }
+
+ if ((in.DerivativePolicies != nil) && (other.DerivativePolicies != nil)) || ((in.DerivativePolicies == nil) != (other.DerivativePolicies == nil)) {
+ in, other := &in.DerivativePolicies, &other.DerivativePolicies
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for key, inValue := range *in {
+ if otherValue, present := (*other)[key]; !present {
+ return false
+ } else {
+ if !inValue.DeepEqual(&otherValue) {
+ return false
+ }
+ }
+ }
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *CiliumNode) DeepEqual(other *CiliumNode) bool {
+ if other == nil {
+ return false
+ }
+
+ if !in.Spec.DeepEqual(&other.Spec) {
+ return false
+ }
+
+ if !in.Status.DeepEqual(&other.Status) {
+ return false
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *ControllerList) DeepEqual(other *ControllerList) bool {
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if !inElement.DeepEqual(&(*other)[i]) {
+ return false
+ }
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *ControllerStatus) DeepEqual(other *ControllerStatus) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.Name != other.Name {
+ return false
+ }
+ if (in.Configuration == nil) != (other.Configuration == nil) {
+ return false
+ } else if in.Configuration != nil {
+ if !in.Configuration.DeepEqual(other.Configuration) {
+ return false
+ }
+ }
+
+ if in.Status != other.Status {
+ return false
+ }
+
+ if in.UUID != other.UUID {
+ return false
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *ControllerStatusStatus) DeepEqual(other *ControllerStatusStatus) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.ConsecutiveFailureCount != other.ConsecutiveFailureCount {
+ return false
+ }
+ if in.FailureCount != other.FailureCount {
+ return false
+ }
+ if in.LastFailureMsg != other.LastFailureMsg {
+ return false
+ }
+ if in.LastFailureTimestamp != other.LastFailureTimestamp {
+ return false
+ }
+ if in.LastSuccessTimestamp != other.LastSuccessTimestamp {
+ return false
+ }
+ if in.SuccessCount != other.SuccessCount {
+ return false
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *DenyIdentityList) DeepEqual(other *DenyIdentityList) bool {
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if !inElement.DeepEqual(&(*other)[i]) {
+ return false
+ }
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *EgressGateway) DeepEqual(other *EgressGateway) bool {
+ if other == nil {
+ return false
+ }
+
+ if (in.NodeSelector == nil) != (other.NodeSelector == nil) {
+ return false
+ } else if in.NodeSelector != nil {
+ if !in.NodeSelector.DeepEqual(other.NodeSelector) {
+ return false
+ }
+ }
+
+ if in.Interface != other.Interface {
+ return false
+ }
+ if in.EgressIP != other.EgressIP {
+ return false
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *EgressRule) DeepEqual(other *EgressRule) bool {
+ if other == nil {
+ return false
+ }
+
+ if (in.NamespaceSelector == nil) != (other.NamespaceSelector == nil) {
+ return false
+ } else if in.NamespaceSelector != nil {
+ if !in.NamespaceSelector.DeepEqual(other.NamespaceSelector) {
+ return false
+ }
+ }
+
+ if (in.PodSelector == nil) != (other.PodSelector == nil) {
+ return false
+ } else if in.PodSelector != nil {
+ if !in.PodSelector.DeepEqual(other.PodSelector) {
+ return false
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *EncryptionSpec) DeepEqual(other *EncryptionSpec) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.Key != other.Key {
+ return false
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *EndpointIdentity) DeepEqual(other *EndpointIdentity) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.ID != other.ID {
+ return false
+ }
+ if ((in.Labels != nil) && (other.Labels != nil)) || ((in.Labels == nil) != (other.Labels == nil)) {
+ in, other := &in.Labels, &other.Labels
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if inElement != (*other)[i] {
+ return false
+ }
+ }
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *EndpointNetworking) DeepEqual(other *EndpointNetworking) bool {
+ if other == nil {
+ return false
+ }
+
+ if ((in.Addressing != nil) && (other.Addressing != nil)) || ((in.Addressing == nil) != (other.Addressing == nil)) {
+ in, other := &in.Addressing, &other.Addressing
+ if other == nil || !in.DeepEqual(other) {
+ return false
+ }
+ }
+
+ if in.NodeIP != other.NodeIP {
+ return false
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *EndpointPolicy) DeepEqual(other *EndpointPolicy) bool {
+ if other == nil {
+ return false
+ }
+
+ if (in.Ingress == nil) != (other.Ingress == nil) {
+ return false
+ } else if in.Ingress != nil {
+ if !in.Ingress.DeepEqual(other.Ingress) {
+ return false
+ }
+ }
+
+ if (in.Egress == nil) != (other.Egress == nil) {
+ return false
+ } else if in.Egress != nil {
+ if !in.Egress.DeepEqual(other.Egress) {
+ return false
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *EndpointPolicyDirection) DeepEqual(other *EndpointPolicyDirection) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.Enforcing != other.Enforcing {
+ return false
+ }
+ if ((in.Allowed != nil) && (other.Allowed != nil)) || ((in.Allowed == nil) != (other.Allowed == nil)) {
+ in, other := &in.Allowed, &other.Allowed
+ if other == nil || !in.DeepEqual(other) {
+ return false
+ }
+ }
+
+ if ((in.Denied != nil) && (other.Denied != nil)) || ((in.Denied == nil) != (other.Denied == nil)) {
+ in, other := &in.Denied, &other.Denied
+ if other == nil || !in.DeepEqual(other) {
+ return false
+ }
+ }
+
+ if ((in.Removing != nil) && (other.Removing != nil)) || ((in.Removing == nil) != (other.Removing == nil)) {
+ in, other := &in.Removing, &other.Removing
+ if other == nil || !in.DeepEqual(other) {
+ return false
+ }
+ }
+
+ if ((in.Adding != nil) && (other.Adding != nil)) || ((in.Adding == nil) != (other.Adding == nil)) {
+ in, other := &in.Adding, &other.Adding
+ if other == nil || !in.DeepEqual(other) {
+ return false
+ }
+ }
+
+ if in.State != other.State {
+ return false
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *EndpointStatus) DeepEqual(other *EndpointStatus) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.ID != other.ID {
+ return false
+ }
+ if ((in.Controllers != nil) && (other.Controllers != nil)) || ((in.Controllers == nil) != (other.Controllers == nil)) {
+ in, other := &in.Controllers, &other.Controllers
+ if other == nil || !in.DeepEqual(other) {
+ return false
+ }
+ }
+
+ if (in.ExternalIdentifiers == nil) != (other.ExternalIdentifiers == nil) {
+ return false
+ } else if in.ExternalIdentifiers != nil {
+ if !in.ExternalIdentifiers.DeepEqual(other.ExternalIdentifiers) {
+ return false
+ }
+ }
+
+ if (in.Health == nil) != (other.Health == nil) {
+ return false
+ } else if in.Health != nil {
+ if !in.Health.DeepEqual(other.Health) {
+ return false
+ }
+ }
+
+ if (in.Identity == nil) != (other.Identity == nil) {
+ return false
+ } else if in.Identity != nil {
+ if !in.Identity.DeepEqual(other.Identity) {
+ return false
+ }
+ }
+
+ if ((in.Log != nil) && (other.Log != nil)) || ((in.Log == nil) != (other.Log == nil)) {
+ in, other := &in.Log, &other.Log
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if !inElement.DeepEqual((*other)[i]) {
+ return false
+ }
+ }
+ }
+ }
+
+ if (in.Networking == nil) != (other.Networking == nil) {
+ return false
+ } else if in.Networking != nil {
+ if !in.Networking.DeepEqual(other.Networking) {
+ return false
+ }
+ }
+
+ if in.Encryption != other.Encryption {
+ return false
+ }
+
+ if (in.Policy == nil) != (other.Policy == nil) {
+ return false
+ } else if in.Policy != nil {
+ if !in.Policy.DeepEqual(other.Policy) {
+ return false
+ }
+ }
+
+ if (in.VisibilityPolicyStatus == nil) != (other.VisibilityPolicyStatus == nil) {
+ return false
+ } else if in.VisibilityPolicyStatus != nil {
+ if *in.VisibilityPolicyStatus != *other.VisibilityPolicyStatus {
+ return false
+ }
+ }
+
+ if in.State != other.State {
+ return false
+ }
+ if ((in.NamedPorts != nil) && (other.NamedPorts != nil)) || ((in.NamedPorts == nil) != (other.NamedPorts == nil)) {
+ in, other := &in.NamedPorts, &other.NamedPorts
+ if other == nil || !in.DeepEqual(other) {
+ return false
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *Frontend) DeepEqual(other *Frontend) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.IP != other.IP {
+ return false
+ }
+ if ((in.ToPorts != nil) && (other.ToPorts != nil)) || ((in.ToPorts == nil) != (other.ToPorts == nil)) {
+ in, other := &in.ToPorts, &other.ToPorts
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if !inElement.DeepEqual(&(*other)[i]) {
+ return false
+ }
+ }
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *HealthAddressingSpec) DeepEqual(other *HealthAddressingSpec) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.IPv4 != other.IPv4 {
+ return false
+ }
+ if in.IPv6 != other.IPv6 {
+ return false
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *IdentityList) DeepEqual(other *IdentityList) bool {
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if !inElement.DeepEqual(&(*other)[i]) {
+ return false
+ }
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *IdentityTuple) DeepEqual(other *IdentityTuple) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.Identity != other.Identity {
+ return false
+ }
+ if ((in.IdentityLabels != nil) && (other.IdentityLabels != nil)) || ((in.IdentityLabels == nil) != (other.IdentityLabels == nil)) {
+ in, other := &in.IdentityLabels, &other.IdentityLabels
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for key, inValue := range *in {
+ if otherValue, present := (*other)[key]; !present {
+ return false
+ } else {
+ if inValue != otherValue {
+ return false
+ }
+ }
+ }
+ }
+ }
+
+ if in.DestPort != other.DestPort {
+ return false
+ }
+ if in.Protocol != other.Protocol {
+ return false
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *NodeAddress) DeepEqual(other *NodeAddress) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.Type != other.Type {
+ return false
+ }
+ if in.IP != other.IP {
+ return false
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *NodeSpec) DeepEqual(other *NodeSpec) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.InstanceID != other.InstanceID {
+ return false
+ }
+ if ((in.Addresses != nil) && (other.Addresses != nil)) || ((in.Addresses == nil) != (other.Addresses == nil)) {
+ in, other := &in.Addresses, &other.Addresses
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if !inElement.DeepEqual(&(*other)[i]) {
+ return false
+ }
+ }
+ }
+ }
+
+ if in.HealthAddressing != other.HealthAddressing {
+ return false
+ }
+
+ if in.IngressAddressing != other.IngressAddressing {
+ return false
+ }
+
+ if in.Encryption != other.Encryption {
+ return false
+ }
+
+ if !in.ENI.DeepEqual(&other.ENI) {
+ return false
+ }
+
+ if in.Azure != other.Azure {
+ return false
+ }
+
+ if !in.AlibabaCloud.DeepEqual(&other.AlibabaCloud) {
+ return false
+ }
+
+ if !in.IPAM.DeepEqual(&other.IPAM) {
+ return false
+ }
+
+ if in.NodeIdentity != other.NodeIdentity {
+ return false
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *NodeStatus) DeepEqual(other *NodeStatus) bool {
+ if other == nil {
+ return false
+ }
+
+ if !in.ENI.DeepEqual(&other.ENI) {
+ return false
+ }
+
+ if !in.Azure.DeepEqual(&other.Azure) {
+ return false
+ }
+
+ if !in.IPAM.DeepEqual(&other.IPAM) {
+ return false
+ }
+
+ if !in.AlibabaCloud.DeepEqual(&other.AlibabaCloud) {
+ return false
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *PortInfo) DeepEqual(other *PortInfo) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.Port != other.Port {
+ return false
+ }
+ if in.Protocol != other.Protocol {
+ return false
+ }
+ if in.Name != other.Name {
+ return false
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *RedirectBackend) DeepEqual(other *RedirectBackend) bool {
+ if other == nil {
+ return false
+ }
+
+ if !in.LocalEndpointSelector.DeepEqual(&other.LocalEndpointSelector) {
+ return false
+ }
+
+ if ((in.ToPorts != nil) && (other.ToPorts != nil)) || ((in.ToPorts == nil) != (other.ToPorts == nil)) {
+ in, other := &in.ToPorts, &other.ToPorts
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if !inElement.DeepEqual(&(*other)[i]) {
+ return false
+ }
+ }
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *RedirectFrontend) DeepEqual(other *RedirectFrontend) bool {
+ if other == nil {
+ return false
+ }
+
+ if (in.AddressMatcher == nil) != (other.AddressMatcher == nil) {
+ return false
+ } else if in.AddressMatcher != nil {
+ if !in.AddressMatcher.DeepEqual(other.AddressMatcher) {
+ return false
+ }
+ }
+
+ if (in.ServiceMatcher == nil) != (other.ServiceMatcher == nil) {
+ return false
+ } else if in.ServiceMatcher != nil {
+ if !in.ServiceMatcher.DeepEqual(other.ServiceMatcher) {
+ return false
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *Service) DeepEqual(other *Service) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.Name != other.Name {
+ return false
+ }
+ if in.Namespace != other.Namespace {
+ return false
+ }
+ if ((in.Ports != nil) && (other.Ports != nil)) || ((in.Ports == nil) != (other.Ports == nil)) {
+ in, other := &in.Ports, &other.Ports
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if inElement != (*other)[i] {
+ return false
+ }
+ }
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *ServiceInfo) DeepEqual(other *ServiceInfo) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.Name != other.Name {
+ return false
+ }
+ if in.Namespace != other.Namespace {
+ return false
+ }
+ if ((in.ToPorts != nil) && (other.ToPorts != nil)) || ((in.ToPorts == nil) != (other.ToPorts == nil)) {
+ in, other := &in.ToPorts, &other.ToPorts
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if !inElement.DeepEqual(&(*other)[i]) {
+ return false
+ }
+ }
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *ServiceListener) DeepEqual(other *ServiceListener) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.Name != other.Name {
+ return false
+ }
+ if in.Namespace != other.Namespace {
+ return false
+ }
+ if in.Listener != other.Listener {
+ return false
+ }
+
+ return true
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/bgpp_types.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/bgpp_types.go
new file mode 100644
index 000000000..2fc8e841b
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/bgpp_types.go
@@ -0,0 +1,372 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package v2alpha1
+
+import (
+ "fmt"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/utils/pointer"
+
+ slimv1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1"
+)
+
+const (
+ // DefaultBGPExportPodCIDR defines the default value for ExportPodCIDR determining whether to export the Node's private CIDR block.
+ DefaultBGPExportPodCIDR = false
+ // DefaultBGPPeerPort defines the TCP port number of a CiliumBGPNeighbor when PeerPort is unspecified.
+ DefaultBGPPeerPort = 179
+ // DefaultBGPEBGPMultihopTTL defines the default value for the TTL value used in BGP packets sent to the eBGP neighbors.
+ DefaultBGPEBGPMultihopTTL = 1
+ // DefaultBGPConnectRetryTimeSeconds defines the default initial value for the BGP ConnectRetryTimer (RFC 4271, Section 8).
+ DefaultBGPConnectRetryTimeSeconds = 120
+ // DefaultBGPHoldTimeSeconds defines the default initial value for the BGP HoldTimer (RFC 4271, Section 4.2).
+ DefaultBGPHoldTimeSeconds = 90
+ // DefaultBGPKeepAliveTimeSeconds defines the default initial value for the BGP KeepaliveTimer (RFC 4271, Section 8).
+ DefaultBGPKeepAliveTimeSeconds = 30
+ // DefaultBGPGRRestartTimeSeconds defines default Restart Time for graceful restart (RFC 4724, section 4.2)
+ DefaultBGPGRRestartTimeSeconds = 120
+ // BGPLoadBalancerClass defines the BGP Control Plane load balancer class for Services.
+ BGPLoadBalancerClass = "io.cilium/bgp-control-plane"
+ // PodCIDRSelectorName defines the name for a selector matching Pod CIDRs
+ // (standard cluster scope / Kubernetes IPAM CIDRs, not Multi-Pool IPAM CIDRs).
+ PodCIDRSelectorName = "PodCIDR"
+ // CiliumLoadBalancerIPPoolSelectorName defines the name for a selector matching CiliumLoadBalancerIPPool resources.
+ CiliumLoadBalancerIPPoolSelectorName = "CiliumLoadBalancerIPPool"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +kubebuilder:resource:categories={cilium,ciliumbgp},singular="ciliumbgppeeringpolicy",path="ciliumbgppeeringpolicies",scope="Cluster",shortName={bgpp}
+// +kubebuilder:printcolumn:JSONPath=".metadata.creationTimestamp",name="Age",type=date
+// +kubebuilder:storageversion
+
+// CiliumBGPPeeringPolicy is a Kubernetes third-party resource for instructing
+// Cilium's BGP control plane to create virtual BGP routers.
+type CiliumBGPPeeringPolicy struct {
+ // +deepequal-gen=false
+ metav1.TypeMeta `json:",inline"`
+ // +deepequal-gen=false
+ metav1.ObjectMeta `json:"metadata"`
+
+ // Spec is a human readable description of a BGP peering policy
+ //
+ // +kubebuilder:validation:Optional
+ Spec CiliumBGPPeeringPolicySpec `json:"spec,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:openapi-gen=false
+// +deepequal-gen=false
+
+// CiliumBGPPeeringPolicyList is a list of
+// CiliumBGPPeeringPolicy objects.
+type CiliumBGPPeeringPolicyList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata"`
+
+ // Items is a list of CiliumBGPPeeringPolicies.
+ Items []CiliumBGPPeeringPolicy `json:"items"`
+}
+
+// CiliumBGPPeeringPolicySpec specifies one or more CiliumBGPVirtualRouter(s)
+// to apply to nodes matching it's label selector.
+type CiliumBGPPeeringPolicySpec struct {
+ // NodeSelector selects a group of nodes where this BGP Peering
+ // Policy applies.
+ //
+ // If empty / nil this policy applies to all nodes.
+ //
+ // +kubebuilder:validation:Optional
+ NodeSelector *slimv1.LabelSelector `json:"nodeSelector,omitempty"`
+ // A list of CiliumBGPVirtualRouter(s) which instructs
+ // the BGP control plane how to instantiate virtual BGP routers.
+ //
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MinItems=1
+ VirtualRouters []CiliumBGPVirtualRouter `json:"virtualRouters"`
+}
+
+type CiliumBGPNeighborGracefulRestart struct {
+ // Enabled flag, when set enables graceful restart capability.
+ //
+ // +kubebuilder:validation:Required
+ Enabled bool `json:"enabled"`
+ // RestartTimeSeconds is the estimated time it will take for the BGP
+ // session to be re-established with peer after a restart.
+ // After this period, peer will remove stale routes. This is
+ // described RFC 4724 section 4.2.
+ //
+ // +kubebuilder:validation:Optional
+ // +kubebuilder:validation:Minimum=1
+ // +kubebuilder:validation:Maximum=4095
+ // +kubebuilder:default=120
+ RestartTimeSeconds *int32 `json:"restartTimeSeconds,omitempty"`
+}
+
+// BGPStandardCommunity type represents a value of the "standard" 32-bit BGP Communities Attribute (RFC 1997)
+// as a 4-byte decimal number or two 2-byte decimal numbers separated by a colon.
+// +kubebuilder:validation:Pattern=`^([0-9]|[1-9][0-9]{1,8}|[1-3][0-9]{9}|4[01][0-9]{8}|42[0-8][0-9]{7}|429[0-3][0-9]{6}|4294[0-8][0-9]{5}|42949[0-5][0-9]{4}|429496[0-6][0-9]{3}|4294967[01][0-9]{2}|42949672[0-8][0-9]|429496729[0-5])$|^([0-9]|[1-9][0-9]{1,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5]):([0-9]|[1-9][0-9]{1,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$`
+type BGPStandardCommunity string
+
+// BGPLargeCommunity type represents a value of the BGP Large Communities Attribute (RFC 8092),
+// as three 4-byte decimal numbers separated by colons.
+// +kubebuilder:validation:Pattern=`^([0-9]|[1-9][0-9]{1,8}|[1-3][0-9]{9}|4[01][0-9]{8}|42[0-8][0-9]{7}|429[0-3][0-9]{6}|4294[0-8][0-9]{5}|42949[0-5][0-9]{4}|429496[0-6][0-9]{3}|4294967[01][0-9]{2}|42949672[0-8][0-9]|429496729[0-5]):([0-9]|[1-9][0-9]{1,8}|[1-3][0-9]{9}|4[01][0-9]{8}|42[0-8][0-9]{7}|429[0-3][0-9]{6}|4294[0-8][0-9]{5}|42949[0-5][0-9]{4}|429496[0-6][0-9]{3}|4294967[01][0-9]{2}|42949672[0-8][0-9]|429496729[0-5]):([0-9]|[1-9][0-9]{1,8}|[1-3][0-9]{9}|4[01][0-9]{8}|42[0-8][0-9]{7}|429[0-3][0-9]{6}|4294[0-8][0-9]{5}|42949[0-5][0-9]{4}|429496[0-6][0-9]{3}|4294967[01][0-9]{2}|42949672[0-8][0-9]|429496729[0-5])$`
+type BGPLargeCommunity string
+
+// BGPCommunities holds community values of the supported BGP community path attributes.
+type BGPCommunities struct {
+ // Standard holds a list of "standard" 32-bit BGP Communities Attribute (RFC 1997) values.
+ //
+ // +kubebuilder:validation:Optional
+ Standard []BGPStandardCommunity `json:"standard,omitempty"`
+
+ // Large holds a list of the BGP Large Communities Attribute (RFC 8092) values.
+ //
+ // +kubebuilder:validation:Optional
+ Large []BGPLargeCommunity `json:"large,omitempty"`
+}
+
+// CiliumBGPPathAttributes can be used to apply additional path attributes
+// to matched routes when advertising them to a BGP peer.
+type CiliumBGPPathAttributes struct {
+ // SelectorType defines the object type on which the Selector applies:
+ // - For "PodCIDR" the Selector matches k8s CiliumNode resources
+ // (path attributes apply to routes announced for PodCIDRs of selected CiliumNodes.
+ // Only affects routes of cluster scope / Kubernetes IPAM CIDRs, not Multi-Pool IPAM CIDRs.
+ // - For "CiliumLoadBalancerIPPool" the Selector matches CiliumLoadBalancerIPPool custom resources
+ // (path attributes apply to routes announced for selected CiliumLoadBalancerIPPools).
+ //
+ // +kubebuilder:validation:Enum=PodCIDR;CiliumLoadBalancerIPPool
+ // +kubebuilder:validation:Required
+ SelectorType string `json:"selectorType"`
+
+ // Selector selects a group of objects of the SelectorType
+ // resulting into routes that will be announced with the configured Attributes.
+ // If nil / not set, all objects of the SelectorType are selected.
+ //
+ // +kubebuilder:validation:Optional
+ Selector *slimv1.LabelSelector `json:"selector,omitempty"`
+
+ // Communities defines a set of community values advertised in the supported BGP Communities path attributes.
+ // If nil / not set, no BGP Communities path attribute will be advertised.
+ //
+ // +kubebuilder:validation:Optional
+ Communities *BGPCommunities `json:"communities,omitempty"`
+
+ // LocalPreference defines the preference value advertised in the BGP Local Preference path attribute.
+ // As Local Preference is only valid for iBGP peers, this value will be ignored for eBGP peers
+ // (no Local Preference path attribute will be advertised).
+ // If nil / not set, the default Local Preference of 100 will be advertised in
+ // the Local Preference path attribute for iBGP peers.
+ //
+ // +kubebuilder:validation:Optional
+ // +kubebuilder:validation:Minimum=0
+ // +kubebuilder:validation:Maximum=4294967295
+ LocalPreference *int64 `json:"localPreference,omitempty"`
+}
+
+// CiliumBGPFamily represents a AFI/SAFI address family pair.
+type CiliumBGPFamily struct {
+ // +kubebuilder:validation:Enum=ipv4;ipv6;l2vpn;ls;opaque
+ // +kubebuilder:validation:Required
+ Afi string `json:"afi"`
+ // +kubebuilder:validation:Enum=unicast;multicast;mpls_label;encapsulation;vpls;evpn;ls;sr_policy;mup;mpls_vpn;mpls_vpn_multicast;route_target_constraints;flowspec_unicast;flowspec_vpn;key_value
+ // +kubebuilder:validation:Required
+ Safi string `json:"safi"`
+}
+
+// CiliumBGPNeighbor is a neighboring peer for use in a
+// CiliumBGPVirtualRouter configuration.
+type CiliumBGPNeighbor struct {
+ // PeerAddress is the IP address of the peer.
+ // This must be in CIDR notation and use a /32 to express
+ // a single host.
+ //
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:Format=cidr
+ PeerAddress string `json:"peerAddress"`
+ // PeerPort is the TCP port of the peer. 1-65535 is the range of
+ // valid port numbers that can be specified. If unset, defaults to 179.
+ //
+ // +kubebuilder:validation:Optional
+ // +kubebuilder:validation:Minimum=1
+ // +kubebuilder:validation:Maximum=65535
+ // +kubebuilder:default=179
+ PeerPort *int32 `json:"peerPort,omitempty"`
+ // PeerASN is the ASN of the peer BGP router.
+ // Supports extended 32bit ASNs
+ //
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:Minimum=0
+ // +kubebuilder:validation:Maximum=4294967295
+ PeerASN int64 `json:"peerASN"`
+ // AuthSecretRef is the name of the secret to use to fetch a TCP
+ // authentication password for this peer.
+ // +kubebuilder:validation:Optional
+ AuthSecretRef *string `json:"authSecretRef,omitempty"`
+ // EBGPMultihopTTL controls the multi-hop feature for eBGP peers.
+ // Its value defines the Time To Live (TTL) value used in BGP packets sent to the neighbor.
+ // The value 1 implies that eBGP multi-hop feature is disabled (only a single hop is allowed).
+ // This field is ignored for iBGP peers.
+ //
+ // +kubebuilder:validation:Optional
+ // +kubebuilder:validation:Minimum=1
+ // +kubebuilder:validation:Maximum=255
+ // +kubebuilder:default=1
+ EBGPMultihopTTL *int32 `json:"eBGPMultihopTTL,omitempty"`
+ // ConnectRetryTimeSeconds defines the initial value for the BGP ConnectRetryTimer (RFC 4271, Section 8).
+ //
+ // +kubebuilder:validation:Optional
+ // +kubebuilder:validation:Minimum=1
+ // +kubebuilder:validation:Maximum=2147483647
+ // +kubebuilder:default=120
+ ConnectRetryTimeSeconds *int32 `json:"connectRetryTimeSeconds,omitempty"`
+ // HoldTimeSeconds defines the initial value for the BGP HoldTimer (RFC 4271, Section 4.2).
+ // Updating this value will cause a session reset.
+ //
+ // +kubebuilder:validation:Optional
+ // +kubebuilder:validation:Minimum=3
+ // +kubebuilder:validation:Maximum=65535
+ // +kubebuilder:default=90
+ HoldTimeSeconds *int32 `json:"holdTimeSeconds,omitempty"`
+ // KeepaliveTimeSeconds defines the initial value for the BGP KeepaliveTimer (RFC 4271, Section 8).
+ // It can not be larger than HoldTimeSeconds. Updating this value will cause a session reset.
+ //
+ // +kubebuilder:validation:Optional
+ // +kubebuilder:validation:Minimum=1
+ // +kubebuilder:validation:Maximum=65535
+ // +kubebuilder:default=30
+ KeepAliveTimeSeconds *int32 `json:"keepAliveTimeSeconds,omitempty"`
+ // GracefulRestart defines graceful restart parameters which are negotiated
+ // with this neighbor. If empty / nil, the graceful restart capability is disabled.
+ //
+ // +kubebuilder:validation:Optional
+ GracefulRestart *CiliumBGPNeighborGracefulRestart `json:"gracefulRestart,omitempty"`
+ // Families, if provided, defines a set of AFI/SAFIs the speaker will
+ // negotiate with it's peer.
+ //
+ // If this slice is not provided the default families of IPv6 and IPv4 will
+ // be provided.
+ //
+ // +kubebuilder:validation:Optional
+ Families []CiliumBGPFamily `json:"families"`
+ // AdvertisedPathAttributes can be used to apply additional path attributes
+ // to selected routes when advertising them to the peer.
+ // If empty / nil, no additional path attributes are advertised.
+ //
+ // +kubebuilder:validation:Optional
+ AdvertisedPathAttributes []CiliumBGPPathAttributes `json:"advertisedPathAttributes,omitempty"`
+}
+
+// CiliumBGPVirtualRouter defines a discrete BGP virtual router configuration.
+type CiliumBGPVirtualRouter struct {
+ // LocalASN is the ASN of this virtual router.
+ // Supports extended 32bit ASNs
+ //
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:Minimum=0
+ // +kubebuilder:validation:Maximum=4294967295
+ LocalASN int64 `json:"localASN"`
+ // ExportPodCIDR determines whether to export the Node's private CIDR block
+ // to the configured neighbors.
+ //
+ // +kubebuilder:validation:Optional
+ // +kubebuilder:default=false
+ ExportPodCIDR *bool `json:"exportPodCIDR,omitempty"`
+ // PodIPPoolSelector selects CiliumPodIPPools based on labels. The virtual
+ // router will announce allocated CIDRs of matching CiliumPodIPPools.
+ //
+ // If empty / nil no CiliumPodIPPools will be announced.
+ //
+ // +kubebuilder:validation:Optional
+ PodIPPoolSelector *slimv1.LabelSelector `json:"podIPPoolSelector,omitempty"`
+ // ServiceSelector selects a group of load balancer services which this
+ // virtual router will announce. The loadBalancerClass for a service must
+ // be nil or specify a class supported by Cilium, e.g. "io.cilium/bgp-control-plane".
+ // Refer to the following document for additional details regarding load balancer
+ // classes:
+ //
+ // https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-class
+ //
+ // If empty / nil no services will be announced.
+ //
+ // +kubebuilder:validation:Optional
+ ServiceSelector *slimv1.LabelSelector `json:"serviceSelector,omitempty"`
+ // Neighbors is a list of neighboring BGP peers for this virtual router
+ //
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MinItems=1
+ Neighbors []CiliumBGPNeighbor `json:"neighbors"`
+}
+
+// SetDefaults applies default values on the CiliumBGPPeeringPolicy.
+// This is normally done by kube-apiserver for fields with explicit static defaults,
+// the main use of this method is to avoid the need for nil-checks in the controller code.
+func (p *CiliumBGPPeeringPolicy) SetDefaults() {
+ for i := range p.Spec.VirtualRouters {
+ p.Spec.VirtualRouters[i].SetDefaults()
+ }
+}
+
+// SetDefaults applies default values on the CiliumBGPVirtualRouter.
+// This is normally done by kube-apiserver for fields with explicit static defaults,
+// the main use of this method is to avoid the need for nil-checks in the controller code.
+func (r *CiliumBGPVirtualRouter) SetDefaults() {
+ if r.ExportPodCIDR == nil {
+ r.ExportPodCIDR = pointer.Bool(DefaultBGPExportPodCIDR)
+ }
+ for i := range r.Neighbors {
+ r.Neighbors[i].SetDefaults()
+ }
+}
+
+// SetDefaults applies default values on the CiliumBGPNeighbor.
+// This is normally done by kube-apiserver for fields with explicit static defaults,
+// the main use of this method is to avoid the need for nil-checks in the controller code.
+func (n *CiliumBGPNeighbor) SetDefaults() {
+ if n.PeerPort == nil || *n.PeerPort == 0 {
+ n.PeerPort = pointer.Int32(DefaultBGPPeerPort)
+ }
+ if n.EBGPMultihopTTL == nil {
+ n.EBGPMultihopTTL = pointer.Int32(DefaultBGPEBGPMultihopTTL)
+ }
+ if n.ConnectRetryTimeSeconds == nil || *n.ConnectRetryTimeSeconds == 0 {
+ n.ConnectRetryTimeSeconds = pointer.Int32(DefaultBGPConnectRetryTimeSeconds)
+ }
+ if n.HoldTimeSeconds == nil || *n.HoldTimeSeconds == 0 {
+ n.HoldTimeSeconds = pointer.Int32(DefaultBGPHoldTimeSeconds)
+ }
+ if n.KeepAliveTimeSeconds == nil || *n.KeepAliveTimeSeconds == 0 {
+ n.KeepAliveTimeSeconds = pointer.Int32(DefaultBGPKeepAliveTimeSeconds)
+ }
+ if n.GracefulRestart != nil && n.GracefulRestart.Enabled &&
+ (n.GracefulRestart.RestartTimeSeconds == nil || *n.GracefulRestart.RestartTimeSeconds == 0) {
+ n.GracefulRestart.RestartTimeSeconds = pointer.Int32(DefaultBGPGRRestartTimeSeconds)
+ }
+ if len(n.Families) == 0 {
+ n.Families = []CiliumBGPFamily{
+ {
+ Afi: "ipv4",
+ Safi: "unicast",
+ },
+ {
+ Afi: "ipv6",
+ Safi: "unicast",
+ },
+ }
+ }
+}
+
+// Validate validates CiliumBGPNeighbor's configuration constraints
+// that can not be expressed using the kubebuilder validation markers.
+func (n *CiliumBGPNeighbor) Validate() error {
+ keepAliveTime := pointer.Int32Deref(n.KeepAliveTimeSeconds, DefaultBGPKeepAliveTimeSeconds)
+ holdTime := pointer.Int32Deref(n.HoldTimeSeconds, DefaultBGPHoldTimeSeconds)
+ if keepAliveTime > holdTime {
+ return fmt.Errorf("KeepAliveTimeSeconds larger than HoldTimeSeconds for peer ASN:%d IP:%s", n.PeerASN, n.PeerAddress)
+ }
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/cidrgroups_types.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/cidrgroups_types.go
new file mode 100644
index 000000000..28bb78003
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/cidrgroups_types.go
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package v2alpha1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ "github.com/cilium/cilium/pkg/policy/api"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +kubebuilder:resource:categories={cilium},singular="ciliumcidrgroup",path="ciliumcidrgroups",scope="Cluster",shortName={ccg}
+// +kubebuilder:object:root=true
+// +kubebuilder:storageversion
+// +deepequal-gen=false
+
+// CiliumCIDRGroup is a list of external CIDRs (i.e: CIDRs selecting peers
+// outside the clusters) that can be referenced as a single entity from
+// CiliumNetworkPolicies.
+type CiliumCIDRGroup struct {
+ // +deepequal-gen=false
+ metav1.TypeMeta `json:",inline"`
+ // +deepequal-gen=false
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // +kubebuilder:validation:Required
+ Spec CiliumCIDRGroupSpec `json:"spec"`
+}
+
+type CiliumCIDRGroupSpec struct {
+ // ExternalCIDRs is a list of CIDRs selecting peers outside the clusters.
+ //
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MinItems=0
+ ExternalCIDRs []api.CIDR `json:"externalCIDRs"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +deepequal-gen=false
+
+type CiliumCIDRGroupList struct {
+ // +deepequal-gen=false
+ metav1.TypeMeta `json:",inline"`
+ // +deepequal-gen=false
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []CiliumCIDRGroup `json:"items"`
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/cnc_types.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/cnc_types.go
new file mode 100644
index 000000000..81a7e135e
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/cnc_types.go
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package v2alpha1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+//+genclient
+//+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+//+kubebuilder:resource:categories={cilium}
+//+kubebuilder:object:root=true
+//+deepequal-gen=false
+//+kubebuilder:storageversion
+
+// CiliumNodeConfig is a list of configuration key-value pairs. It is applied to
+// nodes indicated by a label selector.
+//
+// If multiple overrides apply to the same node, they will be ordered by name
+// with later Overrides overwriting any conflicting keys.
+type CiliumNodeConfig struct {
+ // +deepequal-gen=false
+ metav1.TypeMeta `json:",inline"`
+ // +deepequal-gen=false
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // Spec is the desired Cilium configuration overrides for a given node
+ Spec CiliumNodeConfigSpec `json:"spec"`
+}
+
+// +deepequal-gen=false
+type CiliumNodeConfigSpec struct {
+ // Defaults is treated the same as the cilium-config ConfigMap - a set
+ // of key-value pairs parsed by the agent and operator processes.
+ // Each key must be a valid config-map data field (i.e. a-z, A-Z, -, _, and .)
+ Defaults map[string]string `json:"defaults"`
+
+ // NodeSelector is a label selector that determines to which nodes
+ // this configuration applies.
+ // If not supplied, then this config applies to no nodes. If
+ // empty, then it applies to all nodes.
+ NodeSelector *metav1.LabelSelector `json:"nodeSelector"`
+}
+
+//+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+//+deepequal-gen=false
+
+type CiliumNodeConfigList struct {
+ // +deepequal-gen=false
+ metav1.TypeMeta `json:",inline"`
+ // +deepequal-gen=false
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []CiliumNodeConfig `json:"items"`
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/doc.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/doc.go
new file mode 100644
index 000000000..d90d9acc8
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/doc.go
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// +k8s:deepcopy-gen=package,register
+// +k8s:openapi-gen=true
+// +deepequal-gen=package
+
+// Package v2alpha1 is the v2alpha1 version of the API.
+// +groupName=cilium.io
+package v2alpha1
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/ippool_types.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/ippool_types.go
new file mode 100644
index 000000000..a0e8792f2
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/ippool_types.go
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package v2alpha1
+
+import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +kubebuilder:resource:categories={cilium},singular="ciliumpodippool",path="ciliumpodippools",scope="Cluster",shortName={cpip}
+// +kubebuilder:object:root=true
+// +kubebuilder:storageversion
+
+// CiliumPodIPPool defines an IP pool that can be used for pooled IPAM (i.e. the multi-pool IPAM
+// mode).
+type CiliumPodIPPool struct {
+ // +deepequal-gen=false
+ metav1.TypeMeta `json:",inline"`
+ // +deepequal-gen=false
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // +kubebuilder:validation:Required
+ Spec IPPoolSpec `json:"spec"`
+}
+
+type IPPoolSpec struct {
+ // IPv4 specifies the IPv4 CIDRs and mask sizes of the pool
+ //
+ // +kubebuilder:validation:Optional
+ IPv4 *IPv4PoolSpec `json:"ipv4"`
+
+ // IPv6 specifies the IPv6 CIDRs and mask sizes of the pool
+ //
+ // +kubebuilder:validation:Optional
+ IPv6 *IPv6PoolSpec `json:"ipv6"`
+}
+
+type IPv4PoolSpec struct {
+ // CIDRs is a list of IPv4 CIDRs that are part of the pool.
+ //
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MinItems=1
+ CIDRs []PoolCIDR `json:"cidrs"`
+
+ // MaskSize is the mask size of the pool.
+ //
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:Minimum=1
+ // +kubebuilder:validation:Maximum=32
+ // +kubebuilder:validation:ExclusiveMaximum=false
+ MaskSize uint8 `json:"maskSize"`
+}
+
+type IPv6PoolSpec struct {
+ // CIDRs is a list of IPv6 CIDRs that are part of the pool.
+ //
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MinItems=1
+ CIDRs []PoolCIDR `json:"cidrs"`
+
+ // MaskSize is the mask size of the pool.
+ //
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:Minimum=1
+ // +kubebuilder:validation:Maximum=128
+ // +kubebuilder:validation:ExclusiveMaximum=false
+ MaskSize uint8 `json:"maskSize"`
+}
+
+// PoolCIDR is an IP pool CIDR.
+//
+// +kubebuilder:validation:Format=cidr
+type PoolCIDR string
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +deepequal-gen=false
+
+// CiliumPodIPPoolList is a list of CiliumPodIPPool objects.
+type CiliumPodIPPoolList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata,omitempty"`
+
+ // Items is a list of CiliumPodIPPools.
+ Items []CiliumPodIPPool `json:"items"`
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/l2announcement_types.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/l2announcement_types.go
new file mode 100644
index 000000000..717026045
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/l2announcement_types.go
@@ -0,0 +1,109 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package v2alpha1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ slimv1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1"
+)
+
+// L2AnnounceLoadBalancerClass defines the L2 Announcer load balancer class for Services.
+const L2AnnounceLoadBalancerClass = "io.cilium/l2-announcer"
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +kubebuilder:resource:categories={cilium},singular="ciliuml2announcementpolicy",path="ciliuml2announcementpolicies",scope="Cluster",shortName={l2announcement}
+// +kubebuilder:printcolumn:JSONPath=".metadata.creationTimestamp",name="Age",type=date
+// +kubebuilder:subresource:status
+// +kubebuilder:storageversion
+
+// CiliumL2AnnouncementPolicy is a Kubernetes third-party resource which
+// is used to defined which nodes should announce what services on the
+// L2 network.
+type CiliumL2AnnouncementPolicy struct {
+ // +deepequal-gen=false
+ metav1.TypeMeta `json:",inline"`
+ // +deepequal-gen=false
+ metav1.ObjectMeta `json:"metadata"`
+
+ // Spec is a human readable description of a L2 announcement policy
+ //
+ // +kubebuilder:validation:Optional
+ Spec CiliumL2AnnouncementPolicySpec `json:"spec,omitempty"`
+
+ // Status is the status of the policy.
+ //
+ // +deepequal-gen=false
+ // +kubebuilder:validation:Optional
+ Status CiliumL2AnnouncementPolicyStatus `json:"status"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:openapi-gen=false
+// +deepequal-gen=false
+
+// CiliumL2AnnouncementPolicyList is a list of
+// CiliumL2AnnouncementPolicy objects.
+type CiliumL2AnnouncementPolicyList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata"`
+
+ // Items is a list of CiliumL2AnnouncementPolicies.
+ Items []CiliumL2AnnouncementPolicy `json:"items"`
+}
+
+// +deepequal-gen=true
+
+// CiliumL2AnnouncementPolicySpec specifies which nodes should announce what
+// services to the L2 networks attached to the given list of interfaces.
+type CiliumL2AnnouncementPolicySpec struct {
+ // NodeSelector selects a group of nodes which will announce the IPs for
+ // the services selected by the service selector.
+ //
+ // If nil this policy applies to all nodes.
+ //
+ // +kubebuilder:validation:Optional
+ NodeSelector *slimv1.LabelSelector `json:"nodeSelector"`
+ // ServiceSelector selects a set of services which will be announced over L2 networks.
+ // The loadBalancerClass for a service must be nil or specify a supported class, e.g.
+ // "io.cilium/l2-announcer". Refer to the following document for additional details
+ // regarding load balancer classes:
+ //
+ // https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-class
+ //
+ // If nil this policy applies to all services.
+ //
+ // +kubebuilder:validation:Optional
+ ServiceSelector *slimv1.LabelSelector `json:"serviceSelector"`
+ // If true, the loadbalancer IPs of the services are announced
+ //
+ // If nil this policy applies to all services.
+ //
+ // +kubebuilder:validation:Optional
+ LoadBalancerIPs bool `json:"loadBalancerIPs"`
+ // If true, the external IPs of the services are announced
+ //
+ // +kubebuilder:validation:Optional
+ ExternalIPs bool `json:"externalIPs"`
+ // A list of regular expressions that express which network interface(s) should be used
+ // to announce the services over. If nil, all network interfaces are used.
+ //
+ // +kubebuilder:validation:Optional
+ Interfaces []string `json:"interfaces"`
+}
+
+// +deepequal-gen=false
+
+// CiliumL2AnnouncementPolicyStatus contains the status of a CiliumL2AnnouncementPolicy.
+type CiliumL2AnnouncementPolicyStatus struct {
+ // Current service state
+ // +optional
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ // +listType=map
+ // +listMapKey=type
+ Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"`
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/lbipam_types.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/lbipam_types.go
new file mode 100644
index 000000000..aa714dbbf
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/lbipam_types.go
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package v2alpha1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ slimv1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +kubebuilder:resource:categories={cilium},singular="ciliumloadbalancerippool",path="ciliumloadbalancerippools",scope="Cluster",shortName={ippools,ippool,lbippool,lbippools}
+// +kubebuilder:printcolumn:JSONPath=".spec.disabled",name="Disabled",type=boolean
+// +kubebuilder:printcolumn:name="Conflicting",type=string,JSONPath=`.status.conditions[?(@.type=="cilium.io/PoolConflict")].status`
+// +kubebuilder:printcolumn:name="IPs Available",type=string,JSONPath=`.status.conditions[?(@.type=="cilium.io/IPsAvailable")].message`
+// +kubebuilder:printcolumn:JSONPath=".metadata.creationTimestamp",name="Age",type=date
+// +kubebuilder:subresource:status
+// +kubebuilder:storageversion
+
+// CiliumLoadBalancerIPPool is a Kubernetes third-party resource which
+// is used to defined pools of IPs which the operator can use to to allocate
+// and advertise IPs for Services of type LoadBalancer.
+type CiliumLoadBalancerIPPool struct {
+ // +deepequal-gen=false
+ metav1.TypeMeta `json:",inline"`
+ // +deepequal-gen=false
+ metav1.ObjectMeta `json:"metadata"`
+
+ // Spec is a human readable description for a BGP load balancer
+ // ip pool.
+ //
+ // +kubebuilder:validation:Required
+ Spec CiliumLoadBalancerIPPoolSpec `json:"spec,omitempty"`
+
+ // Status is the status of the IP Pool.
+ //
+ // It might be possible for users to define overlapping IP Pools, we can't validate or enforce non-overlapping pools
+ // during object creation. The Cilium operator will do this validation and update the status to reflect the ability
+ // to allocate IPs from this pool.
+ //
+ // +deepequal-gen=false
+ // +kubebuilder:validation:Optional
+ Status CiliumLoadBalancerIPPoolStatus `json:"status"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:openapi-gen=false
+// +deepequal-gen=false
+
+// CiliumLoadBalancerIPPoolList is a list of
+// CiliumLoadBalancerIPPool objects.
+type CiliumLoadBalancerIPPoolList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata"`
+
+ // Items is a list of CiliumBGPLoadBalancerIPPools.
+ Items []CiliumLoadBalancerIPPool `json:"items"`
+}
+
+// +deepequal-gen=true
+
+// CiliumLoadBalancerIPPoolSpec is a human readable description for
+// a load balancer IP pool.
+type CiliumLoadBalancerIPPoolSpec struct {
+ // ServiceSelector selects a set of services which are eligible to receive IPs from this
+ //
+ // +kubebuilder:validation:Optional
+ ServiceSelector *slimv1.LabelSelector `json:"serviceSelector"`
+ // CiliumLoadBalancerIPPoolCIDRBlock is a list of CIDRs comprising this IP Pool
+ //
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:MinItems=1
+ Cidrs []CiliumLoadBalancerIPPoolCIDRBlock `json:"cidrs"`
+ // Disabled, if set to true means that no new IPs will be allocated from this pool.
+ // Existing allocations will not be removed from services.
+ //
+ // +kubebuilder:validation:Optional
+ // +kubebuilder:default=false
+ Disabled bool `json:"disabled"`
+}
+
+// CiliumLoadBalancerIPPoolCIDRBlock describes a single CIDR block.
+type CiliumLoadBalancerIPPoolCIDRBlock struct {
+ // +kubebuilder:validation:Format=cidr
+ // +kubebuilder:validation:Required
+ Cidr IPv4orIPv6CIDR `json:"cidr"`
+}
+
+// +deepequal-gen=false
+
+// CiliumLoadBalancerIPPoolStatus contains the status of a CiliumLoadBalancerIPPool.
+type CiliumLoadBalancerIPPoolStatus struct {
+ // Current service state
+ // +optional
+ // +patchMergeKey=type
+ // +patchStrategy=merge
+ // +listType=map
+ // +listMapKey=type
+ Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"`
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/register.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/register.go
new file mode 100644
index 000000000..32bb85957
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/register.go
@@ -0,0 +1,143 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package v2alpha1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+
+ k8sconst "github.com/cilium/cilium/pkg/k8s/apis/cilium.io"
+)
+
+const (
+ // CustomResourceDefinitionGroup is the name of the third party resource group
+ CustomResourceDefinitionGroup = k8sconst.CustomResourceDefinitionGroup
+
+ // CustomResourceDefinitionVersion is the current version of the resource
+ CustomResourceDefinitionVersion = "v2alpha1"
+
+ // Cilium Endpoint Slice (CES)
+
+ // CESPluralName is the plural name of Cilium Endpoint Slice
+ CESPluralName = "ciliumendpointslices"
+
+ // CESKindDefinition is the kind name of Cilium Endpoint Slice
+ CESKindDefinition = "CiliumEndpointSlice"
+
+ // CESName is the full name of Cilium Endpoint Slice
+ CESName = CESPluralName + "." + CustomResourceDefinitionGroup
+
+ // Cilium BGP Peering Policy (BGPP)
+
+ // BGPPPluralName is the plural name of Cilium BGP Peering Policy
+ BGPPPluralName = "ciliumbgppeeringpolicies"
+
+ // BGPPKindDefinition is the kind name of Cilium BGP Peering Policy
+ BGPPKindDefinition = "CiliumBGPPeeringPolicy"
+
+ // BGPPName is the full name of Cilium BGP Peering Policy
+ BGPPName = BGPPPluralName + "." + CustomResourceDefinitionGroup
+
+ // Cilium Load Balancer IP Pool (IPPool)
+
+ // PoolPluralName is the plural name of Cilium Load Balancer IP Pool
+ PoolPluralName = "ciliumloadbalancerippools"
+
+ // PoolKindDefinition is the kind name of Cilium Peering Policy
+ PoolKindDefinition = "CiliumLoadBalancerIPPool"
+
+ // LBIPPoolName is the full name of Cilium Load Balancer IP Pool
+ LBIPPoolName = PoolPluralName + "." + CustomResourceDefinitionGroup
+
+ // CiliumNodeConfig (CNC)
+ CNCPluralName = "ciliumnodeconfigs"
+ CNCKindDefinition = "CiliumNodeConfig"
+ CNCName = CNCPluralName + "." + CustomResourceDefinitionGroup
+
+ // CiliumCIDRGroup (CCG)
+ CCGPluralName = "ciliumcidrgroups"
+ CCGKindDefinition = "CiliumCIDRGroup"
+ CCGName = CCGPluralName + "." + CustomResourceDefinitionGroup
+
+ // Cilium L2 Announcement policy
+
+ // L2AnnouncementSingularName is the singular name ofCilium L2 announcement policy
+ L2AnnouncementSingularName = "ciliuml2announcementpolicy"
+
+ // L2AnnouncementPluralName is the plural name of Cilium L2 announcement policy
+ L2AnnouncementPluralName = "ciliuml2announcementpolicies"
+
+ // L2AnnouncementKindDefinition is the kind name of Cilium L2 announcement policy
+ L2AnnouncementKindDefinition = "CiliumL2AnnouncementPolicy"
+
+ // L2AnnouncementName is the full name of Cilium L2 announcement policy
+ L2AnnouncementName = L2AnnouncementPluralName + "." + CustomResourceDefinitionGroup
+
+ // CiliumPodIPPool (CPIP)
+ CPIPPluralName = "ciliumpodippools"
+ CPIPKindDefinition = "CiliumPodIPPool"
+ CPIPName = CPIPPluralName + "." + CustomResourceDefinitionGroup
+)
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{
+ Group: CustomResourceDefinitionGroup,
+ Version: CustomResourceDefinitionVersion,
+}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+ return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+ // SchemeBuilder is needed by DeepCopy generator.
+ SchemeBuilder runtime.SchemeBuilder
+ // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
+ localSchemeBuilder = &SchemeBuilder
+
+ // AddToScheme adds all types of this clientset into the given scheme.
+ // This allows composition of clientsets, like in:
+ //
+ // import (
+ // "k8s.io/client-go/kubernetes"
+ // clientsetscheme "k8s.io/client-go/kubernetes/scheme"
+ // aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
+ // )
+ //
+ // kclientset, _ := kubernetes.NewForConfig(c)
+ // aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
+ AddToScheme = localSchemeBuilder.AddToScheme
+)
+
+func init() {
+ // We only register manually written functions here. The registration of the
+ // generated functions takes place in the generated files. The separation
+ // makes the code compile even when the generated files are missing.
+ localSchemeBuilder.Register(addKnownTypes)
+}
+
+// Adds the list of known types to api.Scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &CiliumEndpointSlice{},
+ &CiliumEndpointSliceList{},
+ &CiliumBGPPeeringPolicy{},
+ &CiliumBGPPeeringPolicyList{},
+ &CiliumLoadBalancerIPPool{},
+ &CiliumLoadBalancerIPPoolList{},
+ &CiliumNodeConfig{},
+ &CiliumNodeConfigList{},
+ &CiliumCIDRGroup{},
+ &CiliumCIDRGroupList{},
+ &CiliumL2AnnouncementPolicy{},
+ &CiliumL2AnnouncementPolicyList{},
+ &CiliumPodIPPool{},
+ &CiliumPodIPPoolList{},
+ )
+
+ metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/types.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/types.go
new file mode 100644
index 000000000..df388cc5c
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/types.go
@@ -0,0 +1,77 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package v2alpha1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ "github.com/cilium/cilium/api/v1/models"
+ cilium_v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2"
+ slimv1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1"
+)
+
+// +kubebuilder:validation:Format=cidr
+type IPv4orIPv6CIDR string
+
+type EgressRule struct {
+ // Selects Namespaces using cluster-scoped labels. This field follows standard label
+ // selector semantics; if present but empty, it selects all namespaces.
+ NamespaceSelector *slimv1.LabelSelector `json:"namespaceSelector,omitempty"`
+
+ // This is a label selector which selects Pods. This field follows standard label
+ // selector semantics; if present but empty, it selects all pods.
+ PodSelector *slimv1.LabelSelector `json:"podSelector,omitempty"`
+}
+
+// CoreCiliumEndpoint is slim version of status of CiliumEndpoint.
+type CoreCiliumEndpoint struct {
+ // Name indicate as CiliumEndpoint name.
+ Name string `json:"name,omitempty"`
+ // IdentityID is the numeric identity of the endpoint
+ IdentityID int64 `json:"id,omitempty"`
+ // Networking is the networking properties of the endpoint.
+
+ // +kubebuilder:validation:Optional
+ Networking *cilium_v2.EndpointNetworking `json:"networking,omitempty"`
+ // Encryption is the encryption configuration of the node
+
+ // +kubebuilder:validation:Optional
+ Encryption cilium_v2.EncryptionSpec `json:"encryption,omitempty"`
+ NamedPorts models.NamedPorts `json:"named-ports,omitempty"`
+}
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +kubebuilder:resource:categories={cilium},singular="ciliumendpointslice",path="ciliumendpointslices",scope="Cluster",shortName={ces}
+// +kubebuilder:storageversion
+
+// CiliumEndpointSlice contains a group of CoreCiliumendpoints.
+type CiliumEndpointSlice struct {
+ // +deepequal-gen=false
+ metav1.TypeMeta `json:",inline"`
+ // +deepequal-gen=false
+ metav1.ObjectMeta `json:"metadata"`
+
+ // Namespace indicate as CiliumEndpointSlice namespace.
+ // All the CiliumEndpoints within the same namespace are put together
+ // in CiliumEndpointSlice.
+ Namespace string `json:"namespace,omitempty"`
+
+ // Endpoints is a list of coreCEPs packed in a CiliumEndpointSlice
+ Endpoints []CoreCiliumEndpoint `json:"endpoints"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:openapi-gen=false
+// +deepequal-gen=false
+
+// CiliumEndpointSliceList is a list of CiliumEndpointSlice objects.
+type CiliumEndpointSliceList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata"`
+
+ // Items is a list of CiliumEndpointSlice.
+ Items []CiliumEndpointSlice `json:"items"`
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/zz_generated.deepcopy.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/zz_generated.deepcopy.go
new file mode 100644
index 000000000..4e3662ace
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/zz_generated.deepcopy.go
@@ -0,0 +1,964 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v2alpha1
+
+import (
+ models "github.com/cilium/cilium/api/v1/models"
+ v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2"
+ v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1"
+ api "github.com/cilium/cilium/pkg/policy/api"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BGPCommunities) DeepCopyInto(out *BGPCommunities) {
+ *out = *in
+ if in.Standard != nil {
+ in, out := &in.Standard, &out.Standard
+ *out = make([]BGPStandardCommunity, len(*in))
+ copy(*out, *in)
+ }
+ if in.Large != nil {
+ in, out := &in.Large, &out.Large
+ *out = make([]BGPLargeCommunity, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BGPCommunities.
+func (in *BGPCommunities) DeepCopy() *BGPCommunities {
+ if in == nil {
+ return nil
+ }
+ out := new(BGPCommunities)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CiliumBGPFamily) DeepCopyInto(out *CiliumBGPFamily) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPFamily.
+func (in *CiliumBGPFamily) DeepCopy() *CiliumBGPFamily {
+ if in == nil {
+ return nil
+ }
+ out := new(CiliumBGPFamily)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CiliumBGPNeighbor) DeepCopyInto(out *CiliumBGPNeighbor) {
+ *out = *in
+ if in.PeerPort != nil {
+ in, out := &in.PeerPort, &out.PeerPort
+ *out = new(int32)
+ **out = **in
+ }
+ if in.AuthSecretRef != nil {
+ in, out := &in.AuthSecretRef, &out.AuthSecretRef
+ *out = new(string)
+ **out = **in
+ }
+ if in.EBGPMultihopTTL != nil {
+ in, out := &in.EBGPMultihopTTL, &out.EBGPMultihopTTL
+ *out = new(int32)
+ **out = **in
+ }
+ if in.ConnectRetryTimeSeconds != nil {
+ in, out := &in.ConnectRetryTimeSeconds, &out.ConnectRetryTimeSeconds
+ *out = new(int32)
+ **out = **in
+ }
+ if in.HoldTimeSeconds != nil {
+ in, out := &in.HoldTimeSeconds, &out.HoldTimeSeconds
+ *out = new(int32)
+ **out = **in
+ }
+ if in.KeepAliveTimeSeconds != nil {
+ in, out := &in.KeepAliveTimeSeconds, &out.KeepAliveTimeSeconds
+ *out = new(int32)
+ **out = **in
+ }
+ if in.GracefulRestart != nil {
+ in, out := &in.GracefulRestart, &out.GracefulRestart
+ *out = new(CiliumBGPNeighborGracefulRestart)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Families != nil {
+ in, out := &in.Families, &out.Families
+ *out = make([]CiliumBGPFamily, len(*in))
+ copy(*out, *in)
+ }
+ if in.AdvertisedPathAttributes != nil {
+ in, out := &in.AdvertisedPathAttributes, &out.AdvertisedPathAttributes
+ *out = make([]CiliumBGPPathAttributes, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPNeighbor.
+func (in *CiliumBGPNeighbor) DeepCopy() *CiliumBGPNeighbor {
+ if in == nil {
+ return nil
+ }
+ out := new(CiliumBGPNeighbor)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CiliumBGPNeighborGracefulRestart) DeepCopyInto(out *CiliumBGPNeighborGracefulRestart) {
+ *out = *in
+ if in.RestartTimeSeconds != nil {
+ in, out := &in.RestartTimeSeconds, &out.RestartTimeSeconds
+ *out = new(int32)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPNeighborGracefulRestart.
+func (in *CiliumBGPNeighborGracefulRestart) DeepCopy() *CiliumBGPNeighborGracefulRestart {
+ if in == nil {
+ return nil
+ }
+ out := new(CiliumBGPNeighborGracefulRestart)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CiliumBGPPathAttributes) DeepCopyInto(out *CiliumBGPPathAttributes) {
+ *out = *in
+ if in.Selector != nil {
+ in, out := &in.Selector, &out.Selector
+ *out = new(v1.LabelSelector)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Communities != nil {
+ in, out := &in.Communities, &out.Communities
+ *out = new(BGPCommunities)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.LocalPreference != nil {
+ in, out := &in.LocalPreference, &out.LocalPreference
+ *out = new(int64)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPPathAttributes.
+func (in *CiliumBGPPathAttributes) DeepCopy() *CiliumBGPPathAttributes {
+ if in == nil {
+ return nil
+ }
+ out := new(CiliumBGPPathAttributes)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CiliumBGPPeeringPolicy) DeepCopyInto(out *CiliumBGPPeeringPolicy) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPPeeringPolicy.
+func (in *CiliumBGPPeeringPolicy) DeepCopy() *CiliumBGPPeeringPolicy {
+ if in == nil {
+ return nil
+ }
+ out := new(CiliumBGPPeeringPolicy)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *CiliumBGPPeeringPolicy) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CiliumBGPPeeringPolicyList) DeepCopyInto(out *CiliumBGPPeeringPolicyList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]CiliumBGPPeeringPolicy, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPPeeringPolicyList.
+func (in *CiliumBGPPeeringPolicyList) DeepCopy() *CiliumBGPPeeringPolicyList {
+ if in == nil {
+ return nil
+ }
+ out := new(CiliumBGPPeeringPolicyList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *CiliumBGPPeeringPolicyList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CiliumBGPPeeringPolicySpec) DeepCopyInto(out *CiliumBGPPeeringPolicySpec) {
+ *out = *in
+ if in.NodeSelector != nil {
+ in, out := &in.NodeSelector, &out.NodeSelector
+ *out = new(v1.LabelSelector)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.VirtualRouters != nil {
+ in, out := &in.VirtualRouters, &out.VirtualRouters
+ *out = make([]CiliumBGPVirtualRouter, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPPeeringPolicySpec.
+func (in *CiliumBGPPeeringPolicySpec) DeepCopy() *CiliumBGPPeeringPolicySpec {
+ if in == nil {
+ return nil
+ }
+ out := new(CiliumBGPPeeringPolicySpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CiliumBGPVirtualRouter) DeepCopyInto(out *CiliumBGPVirtualRouter) {
+ *out = *in
+ if in.ExportPodCIDR != nil {
+ in, out := &in.ExportPodCIDR, &out.ExportPodCIDR
+ *out = new(bool)
+ **out = **in
+ }
+ if in.PodIPPoolSelector != nil {
+ in, out := &in.PodIPPoolSelector, &out.PodIPPoolSelector
+ *out = new(v1.LabelSelector)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.ServiceSelector != nil {
+ in, out := &in.ServiceSelector, &out.ServiceSelector
+ *out = new(v1.LabelSelector)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Neighbors != nil {
+ in, out := &in.Neighbors, &out.Neighbors
+ *out = make([]CiliumBGPNeighbor, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPVirtualRouter.
+func (in *CiliumBGPVirtualRouter) DeepCopy() *CiliumBGPVirtualRouter {
+ if in == nil {
+ return nil
+ }
+ out := new(CiliumBGPVirtualRouter)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CiliumCIDRGroup) DeepCopyInto(out *CiliumCIDRGroup) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumCIDRGroup.
+func (in *CiliumCIDRGroup) DeepCopy() *CiliumCIDRGroup {
+ if in == nil {
+ return nil
+ }
+ out := new(CiliumCIDRGroup)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *CiliumCIDRGroup) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CiliumCIDRGroupList) DeepCopyInto(out *CiliumCIDRGroupList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]CiliumCIDRGroup, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumCIDRGroupList.
+func (in *CiliumCIDRGroupList) DeepCopy() *CiliumCIDRGroupList {
+ if in == nil {
+ return nil
+ }
+ out := new(CiliumCIDRGroupList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *CiliumCIDRGroupList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CiliumCIDRGroupSpec) DeepCopyInto(out *CiliumCIDRGroupSpec) {
+ *out = *in
+ if in.ExternalCIDRs != nil {
+ in, out := &in.ExternalCIDRs, &out.ExternalCIDRs
+ *out = make([]api.CIDR, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumCIDRGroupSpec.
+func (in *CiliumCIDRGroupSpec) DeepCopy() *CiliumCIDRGroupSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(CiliumCIDRGroupSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CiliumEndpointSlice) DeepCopyInto(out *CiliumEndpointSlice) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ if in.Endpoints != nil {
+ in, out := &in.Endpoints, &out.Endpoints
+ *out = make([]CoreCiliumEndpoint, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumEndpointSlice.
+func (in *CiliumEndpointSlice) DeepCopy() *CiliumEndpointSlice {
+ if in == nil {
+ return nil
+ }
+ out := new(CiliumEndpointSlice)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *CiliumEndpointSlice) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CiliumEndpointSliceList) DeepCopyInto(out *CiliumEndpointSliceList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]CiliumEndpointSlice, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumEndpointSliceList.
+func (in *CiliumEndpointSliceList) DeepCopy() *CiliumEndpointSliceList {
+ if in == nil {
+ return nil
+ }
+ out := new(CiliumEndpointSliceList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *CiliumEndpointSliceList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CiliumL2AnnouncementPolicy) DeepCopyInto(out *CiliumL2AnnouncementPolicy) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumL2AnnouncementPolicy.
+func (in *CiliumL2AnnouncementPolicy) DeepCopy() *CiliumL2AnnouncementPolicy {
+ if in == nil {
+ return nil
+ }
+ out := new(CiliumL2AnnouncementPolicy)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *CiliumL2AnnouncementPolicy) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CiliumL2AnnouncementPolicyList) DeepCopyInto(out *CiliumL2AnnouncementPolicyList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]CiliumL2AnnouncementPolicy, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumL2AnnouncementPolicyList.
+func (in *CiliumL2AnnouncementPolicyList) DeepCopy() *CiliumL2AnnouncementPolicyList {
+ if in == nil {
+ return nil
+ }
+ out := new(CiliumL2AnnouncementPolicyList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *CiliumL2AnnouncementPolicyList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CiliumL2AnnouncementPolicySpec) DeepCopyInto(out *CiliumL2AnnouncementPolicySpec) {
+ *out = *in
+ if in.NodeSelector != nil {
+ in, out := &in.NodeSelector, &out.NodeSelector
+ *out = new(v1.LabelSelector)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.ServiceSelector != nil {
+ in, out := &in.ServiceSelector, &out.ServiceSelector
+ *out = new(v1.LabelSelector)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Interfaces != nil {
+ in, out := &in.Interfaces, &out.Interfaces
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumL2AnnouncementPolicySpec.
+func (in *CiliumL2AnnouncementPolicySpec) DeepCopy() *CiliumL2AnnouncementPolicySpec {
+ if in == nil {
+ return nil
+ }
+ out := new(CiliumL2AnnouncementPolicySpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CiliumL2AnnouncementPolicyStatus) DeepCopyInto(out *CiliumL2AnnouncementPolicyStatus) {
+ *out = *in
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]metav1.Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumL2AnnouncementPolicyStatus.
+func (in *CiliumL2AnnouncementPolicyStatus) DeepCopy() *CiliumL2AnnouncementPolicyStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(CiliumL2AnnouncementPolicyStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CiliumLoadBalancerIPPool) DeepCopyInto(out *CiliumLoadBalancerIPPool) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumLoadBalancerIPPool.
+func (in *CiliumLoadBalancerIPPool) DeepCopy() *CiliumLoadBalancerIPPool {
+ if in == nil {
+ return nil
+ }
+ out := new(CiliumLoadBalancerIPPool)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *CiliumLoadBalancerIPPool) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CiliumLoadBalancerIPPoolCIDRBlock) DeepCopyInto(out *CiliumLoadBalancerIPPoolCIDRBlock) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumLoadBalancerIPPoolCIDRBlock.
+func (in *CiliumLoadBalancerIPPoolCIDRBlock) DeepCopy() *CiliumLoadBalancerIPPoolCIDRBlock {
+ if in == nil {
+ return nil
+ }
+ out := new(CiliumLoadBalancerIPPoolCIDRBlock)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CiliumLoadBalancerIPPoolList) DeepCopyInto(out *CiliumLoadBalancerIPPoolList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]CiliumLoadBalancerIPPool, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumLoadBalancerIPPoolList.
+func (in *CiliumLoadBalancerIPPoolList) DeepCopy() *CiliumLoadBalancerIPPoolList {
+ if in == nil {
+ return nil
+ }
+ out := new(CiliumLoadBalancerIPPoolList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *CiliumLoadBalancerIPPoolList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CiliumLoadBalancerIPPoolSpec) DeepCopyInto(out *CiliumLoadBalancerIPPoolSpec) {
+ *out = *in
+ if in.ServiceSelector != nil {
+ in, out := &in.ServiceSelector, &out.ServiceSelector
+ *out = new(v1.LabelSelector)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Cidrs != nil {
+ in, out := &in.Cidrs, &out.Cidrs
+ *out = make([]CiliumLoadBalancerIPPoolCIDRBlock, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumLoadBalancerIPPoolSpec.
+func (in *CiliumLoadBalancerIPPoolSpec) DeepCopy() *CiliumLoadBalancerIPPoolSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(CiliumLoadBalancerIPPoolSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CiliumLoadBalancerIPPoolStatus) DeepCopyInto(out *CiliumLoadBalancerIPPoolStatus) {
+ *out = *in
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]metav1.Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumLoadBalancerIPPoolStatus.
+func (in *CiliumLoadBalancerIPPoolStatus) DeepCopy() *CiliumLoadBalancerIPPoolStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(CiliumLoadBalancerIPPoolStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CiliumNodeConfig) DeepCopyInto(out *CiliumNodeConfig) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumNodeConfig.
+func (in *CiliumNodeConfig) DeepCopy() *CiliumNodeConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(CiliumNodeConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *CiliumNodeConfig) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CiliumNodeConfigList) DeepCopyInto(out *CiliumNodeConfigList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]CiliumNodeConfig, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumNodeConfigList.
+func (in *CiliumNodeConfigList) DeepCopy() *CiliumNodeConfigList {
+ if in == nil {
+ return nil
+ }
+ out := new(CiliumNodeConfigList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *CiliumNodeConfigList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CiliumNodeConfigSpec) DeepCopyInto(out *CiliumNodeConfigSpec) {
+ *out = *in
+ if in.Defaults != nil {
+ in, out := &in.Defaults, &out.Defaults
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.NodeSelector != nil {
+ in, out := &in.NodeSelector, &out.NodeSelector
+ *out = new(metav1.LabelSelector)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumNodeConfigSpec.
+func (in *CiliumNodeConfigSpec) DeepCopy() *CiliumNodeConfigSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(CiliumNodeConfigSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CiliumPodIPPool) DeepCopyInto(out *CiliumPodIPPool) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumPodIPPool.
+func (in *CiliumPodIPPool) DeepCopy() *CiliumPodIPPool {
+ if in == nil {
+ return nil
+ }
+ out := new(CiliumPodIPPool)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *CiliumPodIPPool) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CiliumPodIPPoolList) DeepCopyInto(out *CiliumPodIPPoolList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]CiliumPodIPPool, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumPodIPPoolList.
+func (in *CiliumPodIPPoolList) DeepCopy() *CiliumPodIPPoolList {
+ if in == nil {
+ return nil
+ }
+ out := new(CiliumPodIPPoolList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *CiliumPodIPPoolList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CoreCiliumEndpoint) DeepCopyInto(out *CoreCiliumEndpoint) {
+ *out = *in
+ if in.Networking != nil {
+ in, out := &in.Networking, &out.Networking
+ *out = new(v2.EndpointNetworking)
+ (*in).DeepCopyInto(*out)
+ }
+ out.Encryption = in.Encryption
+ if in.NamedPorts != nil {
+ in, out := &in.NamedPorts, &out.NamedPorts
+ *out = make(models.NamedPorts, len(*in))
+ for i := range *in {
+ if (*in)[i] != nil {
+ in, out := &(*in)[i], &(*out)[i]
+ *out = new(models.Port)
+ **out = **in
+ }
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CoreCiliumEndpoint.
+func (in *CoreCiliumEndpoint) DeepCopy() *CoreCiliumEndpoint {
+ if in == nil {
+ return nil
+ }
+ out := new(CoreCiliumEndpoint)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EgressRule) DeepCopyInto(out *EgressRule) {
+ *out = *in
+ if in.NamespaceSelector != nil {
+ in, out := &in.NamespaceSelector, &out.NamespaceSelector
+ *out = new(v1.LabelSelector)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.PodSelector != nil {
+ in, out := &in.PodSelector, &out.PodSelector
+ *out = new(v1.LabelSelector)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressRule.
+func (in *EgressRule) DeepCopy() *EgressRule {
+ if in == nil {
+ return nil
+ }
+ out := new(EgressRule)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IPPoolSpec) DeepCopyInto(out *IPPoolSpec) {
+ *out = *in
+ if in.IPv4 != nil {
+ in, out := &in.IPv4, &out.IPv4
+ *out = new(IPv4PoolSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.IPv6 != nil {
+ in, out := &in.IPv6, &out.IPv6
+ *out = new(IPv6PoolSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPPoolSpec.
+func (in *IPPoolSpec) DeepCopy() *IPPoolSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(IPPoolSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IPv4PoolSpec) DeepCopyInto(out *IPv4PoolSpec) {
+ *out = *in
+ if in.CIDRs != nil {
+ in, out := &in.CIDRs, &out.CIDRs
+ *out = make([]PoolCIDR, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPv4PoolSpec.
+func (in *IPv4PoolSpec) DeepCopy() *IPv4PoolSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(IPv4PoolSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IPv6PoolSpec) DeepCopyInto(out *IPv6PoolSpec) {
+ *out = *in
+ if in.CIDRs != nil {
+ in, out := &in.CIDRs, &out.CIDRs
+ *out = make([]PoolCIDR, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPv6PoolSpec.
+func (in *IPv6PoolSpec) DeepCopy() *IPv6PoolSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(IPv6PoolSpec)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/zz_generated.deepequal.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/zz_generated.deepequal.go
new file mode 100644
index 000000000..e3ccba0ef
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/zz_generated.deepequal.go
@@ -0,0 +1,687 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by deepequal-gen. DO NOT EDIT.
+
+package v2alpha1
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *BGPCommunities) DeepEqual(other *BGPCommunities) bool {
+ if other == nil {
+ return false
+ }
+
+ if ((in.Standard != nil) && (other.Standard != nil)) || ((in.Standard == nil) != (other.Standard == nil)) {
+ in, other := &in.Standard, &other.Standard
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if inElement != (*other)[i] {
+ return false
+ }
+ }
+ }
+ }
+
+ if ((in.Large != nil) && (other.Large != nil)) || ((in.Large == nil) != (other.Large == nil)) {
+ in, other := &in.Large, &other.Large
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if inElement != (*other)[i] {
+ return false
+ }
+ }
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *CiliumBGPFamily) DeepEqual(other *CiliumBGPFamily) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.Afi != other.Afi {
+ return false
+ }
+ if in.Safi != other.Safi {
+ return false
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *CiliumBGPNeighbor) DeepEqual(other *CiliumBGPNeighbor) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.PeerAddress != other.PeerAddress {
+ return false
+ }
+ if (in.PeerPort == nil) != (other.PeerPort == nil) {
+ return false
+ } else if in.PeerPort != nil {
+ if *in.PeerPort != *other.PeerPort {
+ return false
+ }
+ }
+
+ if in.PeerASN != other.PeerASN {
+ return false
+ }
+ if (in.AuthSecretRef == nil) != (other.AuthSecretRef == nil) {
+ return false
+ } else if in.AuthSecretRef != nil {
+ if *in.AuthSecretRef != *other.AuthSecretRef {
+ return false
+ }
+ }
+
+ if (in.EBGPMultihopTTL == nil) != (other.EBGPMultihopTTL == nil) {
+ return false
+ } else if in.EBGPMultihopTTL != nil {
+ if *in.EBGPMultihopTTL != *other.EBGPMultihopTTL {
+ return false
+ }
+ }
+
+ if (in.ConnectRetryTimeSeconds == nil) != (other.ConnectRetryTimeSeconds == nil) {
+ return false
+ } else if in.ConnectRetryTimeSeconds != nil {
+ if *in.ConnectRetryTimeSeconds != *other.ConnectRetryTimeSeconds {
+ return false
+ }
+ }
+
+ if (in.HoldTimeSeconds == nil) != (other.HoldTimeSeconds == nil) {
+ return false
+ } else if in.HoldTimeSeconds != nil {
+ if *in.HoldTimeSeconds != *other.HoldTimeSeconds {
+ return false
+ }
+ }
+
+ if (in.KeepAliveTimeSeconds == nil) != (other.KeepAliveTimeSeconds == nil) {
+ return false
+ } else if in.KeepAliveTimeSeconds != nil {
+ if *in.KeepAliveTimeSeconds != *other.KeepAliveTimeSeconds {
+ return false
+ }
+ }
+
+ if (in.GracefulRestart == nil) != (other.GracefulRestart == nil) {
+ return false
+ } else if in.GracefulRestart != nil {
+ if !in.GracefulRestart.DeepEqual(other.GracefulRestart) {
+ return false
+ }
+ }
+
+ if ((in.Families != nil) && (other.Families != nil)) || ((in.Families == nil) != (other.Families == nil)) {
+ in, other := &in.Families, &other.Families
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if !inElement.DeepEqual(&(*other)[i]) {
+ return false
+ }
+ }
+ }
+ }
+
+ if ((in.AdvertisedPathAttributes != nil) && (other.AdvertisedPathAttributes != nil)) || ((in.AdvertisedPathAttributes == nil) != (other.AdvertisedPathAttributes == nil)) {
+ in, other := &in.AdvertisedPathAttributes, &other.AdvertisedPathAttributes
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if !inElement.DeepEqual(&(*other)[i]) {
+ return false
+ }
+ }
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *CiliumBGPNeighborGracefulRestart) DeepEqual(other *CiliumBGPNeighborGracefulRestart) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.Enabled != other.Enabled {
+ return false
+ }
+ if (in.RestartTimeSeconds == nil) != (other.RestartTimeSeconds == nil) {
+ return false
+ } else if in.RestartTimeSeconds != nil {
+ if *in.RestartTimeSeconds != *other.RestartTimeSeconds {
+ return false
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *CiliumBGPPathAttributes) DeepEqual(other *CiliumBGPPathAttributes) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.SelectorType != other.SelectorType {
+ return false
+ }
+ if (in.Selector == nil) != (other.Selector == nil) {
+ return false
+ } else if in.Selector != nil {
+ if !in.Selector.DeepEqual(other.Selector) {
+ return false
+ }
+ }
+
+ if (in.Communities == nil) != (other.Communities == nil) {
+ return false
+ } else if in.Communities != nil {
+ if !in.Communities.DeepEqual(other.Communities) {
+ return false
+ }
+ }
+
+ if (in.LocalPreference == nil) != (other.LocalPreference == nil) {
+ return false
+ } else if in.LocalPreference != nil {
+ if *in.LocalPreference != *other.LocalPreference {
+ return false
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *CiliumBGPPeeringPolicy) DeepEqual(other *CiliumBGPPeeringPolicy) bool {
+ if other == nil {
+ return false
+ }
+
+ if !in.Spec.DeepEqual(&other.Spec) {
+ return false
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *CiliumBGPPeeringPolicySpec) DeepEqual(other *CiliumBGPPeeringPolicySpec) bool {
+ if other == nil {
+ return false
+ }
+
+ if (in.NodeSelector == nil) != (other.NodeSelector == nil) {
+ return false
+ } else if in.NodeSelector != nil {
+ if !in.NodeSelector.DeepEqual(other.NodeSelector) {
+ return false
+ }
+ }
+
+ if ((in.VirtualRouters != nil) && (other.VirtualRouters != nil)) || ((in.VirtualRouters == nil) != (other.VirtualRouters == nil)) {
+ in, other := &in.VirtualRouters, &other.VirtualRouters
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if !inElement.DeepEqual(&(*other)[i]) {
+ return false
+ }
+ }
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *CiliumBGPVirtualRouter) DeepEqual(other *CiliumBGPVirtualRouter) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.LocalASN != other.LocalASN {
+ return false
+ }
+ if (in.ExportPodCIDR == nil) != (other.ExportPodCIDR == nil) {
+ return false
+ } else if in.ExportPodCIDR != nil {
+ if *in.ExportPodCIDR != *other.ExportPodCIDR {
+ return false
+ }
+ }
+
+ if (in.PodIPPoolSelector == nil) != (other.PodIPPoolSelector == nil) {
+ return false
+ } else if in.PodIPPoolSelector != nil {
+ if !in.PodIPPoolSelector.DeepEqual(other.PodIPPoolSelector) {
+ return false
+ }
+ }
+
+ if (in.ServiceSelector == nil) != (other.ServiceSelector == nil) {
+ return false
+ } else if in.ServiceSelector != nil {
+ if !in.ServiceSelector.DeepEqual(other.ServiceSelector) {
+ return false
+ }
+ }
+
+ if ((in.Neighbors != nil) && (other.Neighbors != nil)) || ((in.Neighbors == nil) != (other.Neighbors == nil)) {
+ in, other := &in.Neighbors, &other.Neighbors
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if !inElement.DeepEqual(&(*other)[i]) {
+ return false
+ }
+ }
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *CiliumCIDRGroupSpec) DeepEqual(other *CiliumCIDRGroupSpec) bool {
+ if other == nil {
+ return false
+ }
+
+ if ((in.ExternalCIDRs != nil) && (other.ExternalCIDRs != nil)) || ((in.ExternalCIDRs == nil) != (other.ExternalCIDRs == nil)) {
+ in, other := &in.ExternalCIDRs, &other.ExternalCIDRs
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if inElement != (*other)[i] {
+ return false
+ }
+ }
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *CiliumEndpointSlice) DeepEqual(other *CiliumEndpointSlice) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.Namespace != other.Namespace {
+ return false
+ }
+ if ((in.Endpoints != nil) && (other.Endpoints != nil)) || ((in.Endpoints == nil) != (other.Endpoints == nil)) {
+ in, other := &in.Endpoints, &other.Endpoints
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if !inElement.DeepEqual(&(*other)[i]) {
+ return false
+ }
+ }
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *CiliumL2AnnouncementPolicy) DeepEqual(other *CiliumL2AnnouncementPolicy) bool {
+ if other == nil {
+ return false
+ }
+
+ if !in.Spec.DeepEqual(&other.Spec) {
+ return false
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *CiliumL2AnnouncementPolicySpec) DeepEqual(other *CiliumL2AnnouncementPolicySpec) bool {
+ if other == nil {
+ return false
+ }
+
+ if (in.NodeSelector == nil) != (other.NodeSelector == nil) {
+ return false
+ } else if in.NodeSelector != nil {
+ if !in.NodeSelector.DeepEqual(other.NodeSelector) {
+ return false
+ }
+ }
+
+ if (in.ServiceSelector == nil) != (other.ServiceSelector == nil) {
+ return false
+ } else if in.ServiceSelector != nil {
+ if !in.ServiceSelector.DeepEqual(other.ServiceSelector) {
+ return false
+ }
+ }
+
+ if in.LoadBalancerIPs != other.LoadBalancerIPs {
+ return false
+ }
+ if in.ExternalIPs != other.ExternalIPs {
+ return false
+ }
+ if ((in.Interfaces != nil) && (other.Interfaces != nil)) || ((in.Interfaces == nil) != (other.Interfaces == nil)) {
+ in, other := &in.Interfaces, &other.Interfaces
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if inElement != (*other)[i] {
+ return false
+ }
+ }
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *CiliumLoadBalancerIPPool) DeepEqual(other *CiliumLoadBalancerIPPool) bool {
+ if other == nil {
+ return false
+ }
+
+ if !in.Spec.DeepEqual(&other.Spec) {
+ return false
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *CiliumLoadBalancerIPPoolCIDRBlock) DeepEqual(other *CiliumLoadBalancerIPPoolCIDRBlock) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.Cidr != other.Cidr {
+ return false
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *CiliumLoadBalancerIPPoolSpec) DeepEqual(other *CiliumLoadBalancerIPPoolSpec) bool {
+ if other == nil {
+ return false
+ }
+
+ if (in.ServiceSelector == nil) != (other.ServiceSelector == nil) {
+ return false
+ } else if in.ServiceSelector != nil {
+ if !in.ServiceSelector.DeepEqual(other.ServiceSelector) {
+ return false
+ }
+ }
+
+ if ((in.Cidrs != nil) && (other.Cidrs != nil)) || ((in.Cidrs == nil) != (other.Cidrs == nil)) {
+ in, other := &in.Cidrs, &other.Cidrs
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if !inElement.DeepEqual(&(*other)[i]) {
+ return false
+ }
+ }
+ }
+ }
+
+ if in.Disabled != other.Disabled {
+ return false
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *CiliumPodIPPool) DeepEqual(other *CiliumPodIPPool) bool {
+ if other == nil {
+ return false
+ }
+
+ if !in.Spec.DeepEqual(&other.Spec) {
+ return false
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *CoreCiliumEndpoint) DeepEqual(other *CoreCiliumEndpoint) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.Name != other.Name {
+ return false
+ }
+ if in.IdentityID != other.IdentityID {
+ return false
+ }
+ if (in.Networking == nil) != (other.Networking == nil) {
+ return false
+ } else if in.Networking != nil {
+ if !in.Networking.DeepEqual(other.Networking) {
+ return false
+ }
+ }
+
+ if in.Encryption != other.Encryption {
+ return false
+ }
+
+ if ((in.NamedPorts != nil) && (other.NamedPorts != nil)) || ((in.NamedPorts == nil) != (other.NamedPorts == nil)) {
+ in, other := &in.NamedPorts, &other.NamedPorts
+ if other == nil || !in.DeepEqual(other) {
+ return false
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *EgressRule) DeepEqual(other *EgressRule) bool {
+ if other == nil {
+ return false
+ }
+
+ if (in.NamespaceSelector == nil) != (other.NamespaceSelector == nil) {
+ return false
+ } else if in.NamespaceSelector != nil {
+ if !in.NamespaceSelector.DeepEqual(other.NamespaceSelector) {
+ return false
+ }
+ }
+
+ if (in.PodSelector == nil) != (other.PodSelector == nil) {
+ return false
+ } else if in.PodSelector != nil {
+ if !in.PodSelector.DeepEqual(other.PodSelector) {
+ return false
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *IPPoolSpec) DeepEqual(other *IPPoolSpec) bool {
+ if other == nil {
+ return false
+ }
+
+ if (in.IPv4 == nil) != (other.IPv4 == nil) {
+ return false
+ } else if in.IPv4 != nil {
+ if !in.IPv4.DeepEqual(other.IPv4) {
+ return false
+ }
+ }
+
+ if (in.IPv6 == nil) != (other.IPv6 == nil) {
+ return false
+ } else if in.IPv6 != nil {
+ if !in.IPv6.DeepEqual(other.IPv6) {
+ return false
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *IPv4PoolSpec) DeepEqual(other *IPv4PoolSpec) bool {
+ if other == nil {
+ return false
+ }
+
+ if ((in.CIDRs != nil) && (other.CIDRs != nil)) || ((in.CIDRs == nil) != (other.CIDRs == nil)) {
+ in, other := &in.CIDRs, &other.CIDRs
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if inElement != (*other)[i] {
+ return false
+ }
+ }
+ }
+ }
+
+ if in.MaskSize != other.MaskSize {
+ return false
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *IPv6PoolSpec) DeepEqual(other *IPv6PoolSpec) bool {
+ if other == nil {
+ return false
+ }
+
+ if ((in.CIDRs != nil) && (other.CIDRs != nil)) || ((in.CIDRs == nil) != (other.CIDRs == nil)) {
+ in, other := &in.CIDRs, &other.CIDRs
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if inElement != (*other)[i] {
+ return false
+ }
+ }
+ }
+ }
+
+ if in.MaskSize != other.MaskSize {
+ return false
+ }
+
+ return true
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/cell.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/cell.go
new file mode 100644
index 000000000..11de1e25f
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/cell.go
@@ -0,0 +1,521 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package client
+
+import (
+ "context"
+ "fmt"
+ "net"
+ "net/http"
+ "os"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "github.com/sirupsen/logrus"
+ apiext_clientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
+ apiext_fake "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake"
+ "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ utilnet "k8s.io/apimachinery/pkg/util/net"
+ utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+ "k8s.io/apimachinery/pkg/util/wait"
+ "k8s.io/client-go/discovery"
+ "k8s.io/client-go/kubernetes"
+ "k8s.io/client-go/kubernetes/fake"
+ "k8s.io/client-go/rest"
+ "k8s.io/client-go/tools/clientcmd"
+ "k8s.io/client-go/util/connrotation"
+
+ "github.com/cilium/cilium/pkg/controller"
+ "github.com/cilium/cilium/pkg/hive"
+ "github.com/cilium/cilium/pkg/hive/cell"
+ cilium_clientset "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned"
+ cilium_fake "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/fake"
+ k8smetrics "github.com/cilium/cilium/pkg/k8s/metrics"
+ slim_apiextclientsetscheme "github.com/cilium/cilium/pkg/k8s/slim/k8s/apiextensions-client/clientset/versioned/scheme"
+ slim_apiext_clientset "github.com/cilium/cilium/pkg/k8s/slim/k8s/apiextensions-clientset"
+ slim_metav1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1"
+ slim_metav1beta1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1beta1"
+ slim_clientset "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned"
+ slim_fake "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/fake"
+ k8sversion "github.com/cilium/cilium/pkg/k8s/version"
+ "github.com/cilium/cilium/pkg/logging"
+ "github.com/cilium/cilium/pkg/logging/logfields"
+ "github.com/cilium/cilium/pkg/version"
+)
+
+// client.Cell provides Clientset, a composition of clientsets to Kubernetes resources
+// used by Cilium.
+var Cell = cell.Module(
+ "k8s-client",
+ "Kubernetes Client",
+
+ cell.Config(defaultConfig),
+ cell.Provide(newClientset),
+)
+
+var k8sHeartbeatControllerGroup = controller.NewGroup("k8s-heartbeat")
+
+// Type aliases for the clientsets to avoid name collision on 'Clientset' when composing them.
+type (
+ KubernetesClientset = kubernetes.Clientset
+ SlimClientset = slim_clientset.Clientset
+ APIExtClientset = slim_apiext_clientset.Clientset
+ CiliumClientset = cilium_clientset.Clientset
+)
+
+// Clientset is a composition of the different client sets used by Cilium.
+type Clientset interface {
+ kubernetes.Interface
+ apiext_clientset.Interface
+ cilium_clientset.Interface
+ Getters
+
+ // Slim returns the slim client, which contains some of the same APIs as the
+ // normal kubernetes client, but with slimmed down messages to reduce memory
+ // usage. Prefer the slim version when caching messages.
+ Slim() slim_clientset.Interface
+
+ // IsEnabled returns true if Kubernetes support is enabled and the
+ // clientset can be used.
+ IsEnabled() bool
+
+ // Disable disables the client. Panics if called after the clientset has been
+ // started.
+ Disable()
+
+ // Config returns the configuration used to create this client.
+ Config() Config
+
+ // RestConfig returns the deep copy of rest configuration.
+ RestConfig() *rest.Config
+}
+
+// compositeClientset implements the Clientset using real clients.
+type compositeClientset struct {
+ started bool
+ disabled bool
+
+ *KubernetesClientset
+ *APIExtClientset
+ *CiliumClientset
+ clientsetGetters
+
+ controller *controller.Manager
+ slim *SlimClientset
+ config Config
+ log logrus.FieldLogger
+ closeAllConns func()
+ restConfig *rest.Config
+}
+
+func newClientset(lc hive.Lifecycle, log logrus.FieldLogger, cfg Config) (Clientset, error) {
+ if !cfg.isEnabled() {
+ return &compositeClientset{disabled: true}, nil
+ }
+
+ if cfg.K8sAPIServer != "" &&
+ !strings.HasPrefix(cfg.K8sAPIServer, "http") {
+ cfg.K8sAPIServer = "http://" + cfg.K8sAPIServer // default to HTTP
+ }
+
+ client := compositeClientset{
+ log: log,
+ controller: controller.NewManager(),
+ config: cfg,
+ }
+
+ restConfig, err := createConfig(cfg.K8sAPIServer, cfg.K8sKubeConfigPath, cfg.K8sClientQPS, cfg.K8sClientBurst)
+ if err != nil {
+ return nil, fmt.Errorf("unable to create k8s client rest configuration: %w", err)
+ }
+ client.restConfig = restConfig
+ defaultCloseAllConns := setDialer(cfg, restConfig)
+
+ httpClient, err := rest.HTTPClientFor(restConfig)
+ if err != nil {
+ return nil, fmt.Errorf("unable to create k8s REST client: %w", err)
+ }
+
+ // We are implementing the same logic as Kubelet, see
+ // https://github.com/kubernetes/kubernetes/blob/v1.24.0-beta.0/cmd/kubelet/app/server.go#L852.
+ if s := os.Getenv("DISABLE_HTTP2"); len(s) > 0 {
+ client.closeAllConns = defaultCloseAllConns
+ } else {
+ client.closeAllConns = func() {
+ utilnet.CloseIdleConnectionsFor(restConfig.Transport)
+ }
+ }
+
+ // Slim and K8s clients use protobuf marshalling.
+ restConfig.ContentConfig.ContentType = `application/vnd.kubernetes.protobuf`
+
+ client.slim, err = slim_clientset.NewForConfigAndClient(restConfig, httpClient)
+ if err != nil {
+ return nil, fmt.Errorf("unable to create slim k8s client: %w", err)
+ }
+
+ client.APIExtClientset, err = slim_apiext_clientset.NewForConfigAndClient(restConfig, httpClient)
+ if err != nil {
+ return nil, fmt.Errorf("unable to create apiext k8s client: %w", err)
+ }
+
+ client.KubernetesClientset, err = kubernetes.NewForConfigAndClient(restConfig, httpClient)
+ if err != nil {
+ return nil, fmt.Errorf("unable to create k8s client: %w", err)
+ }
+
+ client.clientsetGetters = clientsetGetters{&client}
+
+ // The cilium client uses JSON marshalling.
+ restConfig.ContentConfig.ContentType = `application/json`
+ client.CiliumClientset, err = cilium_clientset.NewForConfigAndClient(restConfig, httpClient)
+ if err != nil {
+ return nil, fmt.Errorf("unable to create cilium k8s client: %w", err)
+ }
+
+ lc.Append(hive.Hook{
+ OnStart: client.onStart,
+ OnStop: client.onStop,
+ })
+
+ return &client, nil
+}
+
+func (c *compositeClientset) Slim() slim_clientset.Interface {
+ return c.slim
+}
+
+func (c *compositeClientset) Discovery() discovery.DiscoveryInterface {
+ return c.KubernetesClientset.Discovery()
+}
+
+func (c *compositeClientset) IsEnabled() bool {
+ return c != nil && c.config.isEnabled() && !c.disabled
+}
+
+func (c *compositeClientset) Disable() {
+ if c.started {
+ panic("Clientset.Disable() called after it had been started")
+ }
+ c.disabled = true
+}
+
+func (c *compositeClientset) Config() Config {
+ return c.config
+}
+
+func (c *compositeClientset) RestConfig() *rest.Config {
+ return rest.CopyConfig(c.restConfig)
+}
+
+func (c *compositeClientset) onStart(startCtx hive.HookContext) error {
+ if !c.IsEnabled() {
+ return nil
+ }
+
+ if err := c.waitForConn(startCtx); err != nil {
+ return err
+ }
+ c.startHeartbeat()
+
+ // Update the global K8s clients, K8s version and the capabilities.
+ if err := k8sversion.Update(c, c.config.EnableK8sAPIDiscovery); err != nil {
+ return err
+ }
+
+ if !k8sversion.Capabilities().MinimalVersionMet {
+ return fmt.Errorf("k8s version (%v) is not meeting the minimal requirement (%v)",
+ k8sversion.Version(), k8sversion.MinimalVersionConstraint)
+ }
+
+ c.started = true
+
+ return nil
+}
+
+func (c *compositeClientset) onStop(stopCtx hive.HookContext) error {
+ if c.IsEnabled() {
+ c.controller.RemoveAllAndWait()
+ c.closeAllConns()
+ }
+ c.started = false
+ return nil
+}
+
+func (c *compositeClientset) startHeartbeat() {
+ restClient := c.KubernetesClientset.RESTClient()
+
+ timeout := c.config.K8sHeartbeatTimeout
+ if timeout == 0 {
+ return
+ }
+
+ heartBeat := func(ctx context.Context) error {
+ // Kubernetes does a get node of the node that kubelet is running [0]. This seems excessive in
+ // our case because the amount of data transferred is bigger than doing a Get of /healthz.
+ // For this reason we have picked to perform a get on `/healthz` instead a get of a node.
+ //
+ // [0] https://github.com/kubernetes/kubernetes/blob/v1.17.3/pkg/kubelet/kubelet_node_status.go#L423
+ res := restClient.Get().Resource("healthz").Do(ctx)
+ return res.Error()
+ }
+
+ c.controller.UpdateController("k8s-heartbeat",
+ controller.ControllerParams{
+ Group: k8sHeartbeatControllerGroup,
+ DoFunc: func(context.Context) error {
+ runHeartbeat(
+ c.log,
+ heartBeat,
+ timeout,
+ c.closeAllConns,
+ )
+ return nil
+ },
+ RunInterval: timeout,
+ })
+}
+
+// createConfig creates a rest.Config for connecting to k8s api-server.
+//
+// The precedence of the configuration selection is the following:
+// 1. kubeCfgPath
+// 2. apiServerURL (https if specified)
+// 3. rest.InClusterConfig().
+func createConfig(apiServerURL, kubeCfgPath string, qps float32, burst int) (*rest.Config, error) {
+ var (
+ config *rest.Config
+ err error
+ )
+ cmdName := "cilium"
+ if len(os.Args[0]) != 0 {
+ cmdName = filepath.Base(os.Args[0])
+ }
+ userAgent := fmt.Sprintf("%s/%s", cmdName, version.Version)
+
+ switch {
+ // If the apiServerURL and the kubeCfgPath are empty then we can try getting
+ // the rest.Config from the InClusterConfig
+ case apiServerURL == "" && kubeCfgPath == "":
+ if config, err = rest.InClusterConfig(); err != nil {
+ return nil, err
+ }
+ case kubeCfgPath != "":
+ if config, err = clientcmd.BuildConfigFromFlags("", kubeCfgPath); err != nil {
+ return nil, err
+ }
+ case strings.HasPrefix(apiServerURL, "https://"):
+ if config, err = rest.InClusterConfig(); err != nil {
+ return nil, err
+ }
+ config.Host = apiServerURL
+ default:
+ config = &rest.Config{Host: apiServerURL, UserAgent: userAgent}
+ }
+
+ setConfig(config, userAgent, qps, burst)
+ return config, nil
+}
+
+func setConfig(config *rest.Config, userAgent string, qps float32, burst int) {
+ if userAgent != "" {
+ config.UserAgent = userAgent
+ }
+ if qps != 0.0 {
+ config.QPS = qps
+ }
+ if burst != 0 {
+ config.Burst = burst
+ }
+}
+
+func (c *compositeClientset) waitForConn(ctx context.Context) error {
+ stop := make(chan struct{})
+ timeout := time.NewTimer(time.Minute)
+ defer timeout.Stop()
+ var err error
+ wait.Until(func() {
+ c.log.WithField("host", c.restConfig.Host).Info("Establishing connection to apiserver")
+ err = isConnReady(c)
+ if err == nil {
+ close(stop)
+ return
+ }
+
+ select {
+ case <-ctx.Done():
+ case <-timeout.C:
+ default:
+ return
+ }
+
+ c.log.WithError(err).WithField(logfields.IPAddr, c.restConfig.Host).Error("Unable to contact k8s api-server")
+ close(stop)
+ }, 5*time.Second, stop)
+ if err == nil {
+ c.log.Info("Connected to apiserver")
+ }
+ return err
+}
+
+func setDialer(cfg Config, restConfig *rest.Config) func() {
+ if cfg.K8sHeartbeatTimeout == 0 {
+ return func() {}
+ }
+ ctx := (&net.Dialer{
+ Timeout: cfg.K8sHeartbeatTimeout,
+ KeepAlive: cfg.K8sHeartbeatTimeout,
+ }).DialContext
+ dialer := connrotation.NewDialer(ctx)
+ restConfig.Dial = dialer.DialContext
+ return dialer.CloseAll
+}
+
+func runHeartbeat(log logrus.FieldLogger, heartBeat func(context.Context) error, timeout time.Duration, closeAllConns ...func()) {
+ expireDate := time.Now().Add(-timeout)
+ // Don't even perform a health check if we have received a successful
+ // k8s event in the last 'timeout' duration
+ if k8smetrics.LastSuccessInteraction.Time().After(expireDate) {
+ return
+ }
+
+ done := make(chan error)
+ ctx, cancel := context.WithTimeout(context.Background(), timeout)
+ defer cancel()
+ go func() {
+ // If we have reached up to this point to perform a heartbeat to
+ // kube-apiserver then we should close the connections if we receive
+ // any error at all except if we receive a http.StatusTooManyRequests
+ // which means the server is overloaded and only for this reason we
+ // will not close all connections.
+ err := heartBeat(ctx)
+ switch t := err.(type) {
+ case *errors.StatusError:
+ if t.ErrStatus.Code != http.StatusTooManyRequests {
+ done <- err
+ }
+ default:
+ done <- err
+ }
+ close(done)
+ }()
+
+ select {
+ case err := <-done:
+ if err != nil {
+ log.WithError(err).Warn("Network status error received, restarting client connections")
+ for _, fn := range closeAllConns {
+ fn()
+ }
+ }
+ case <-ctx.Done():
+ log.Warn("Heartbeat timed out, restarting client connections")
+ for _, fn := range closeAllConns {
+ fn()
+ }
+ }
+}
+
+// isConnReady returns the err for the kube-system namespace get
+func isConnReady(c kubernetes.Interface) error {
+ _, err := c.CoreV1().Namespaces().Get(context.TODO(), "kube-system", metav1.GetOptions{})
+ return err
+}
+
+var FakeClientCell = cell.Provide(NewFakeClientset)
+
+type (
+ KubernetesFakeClientset = fake.Clientset
+ SlimFakeClientset = slim_fake.Clientset
+ CiliumFakeClientset = cilium_fake.Clientset
+ APIExtFakeClientset = apiext_fake.Clientset
+)
+
+type FakeClientset struct {
+ disabled bool
+
+ *KubernetesFakeClientset
+ *CiliumFakeClientset
+ *APIExtFakeClientset
+ clientsetGetters
+
+ SlimFakeClientset *SlimFakeClientset
+
+ enabled bool
+}
+
+var _ Clientset = &FakeClientset{}
+
+func (c *FakeClientset) Slim() slim_clientset.Interface {
+ return c.SlimFakeClientset
+}
+
+func (c *FakeClientset) Discovery() discovery.DiscoveryInterface {
+ return c.KubernetesFakeClientset.Discovery()
+}
+
+func (c *FakeClientset) IsEnabled() bool {
+ return !c.disabled
+}
+
+func (c *FakeClientset) Disable() {
+ c.disabled = true
+}
+
+func (c *FakeClientset) Config() Config {
+ return Config{}
+}
+
+func (c *FakeClientset) RestConfig() *rest.Config {
+ return &rest.Config{}
+}
+
+func NewFakeClientset() (*FakeClientset, Clientset) {
+ client := FakeClientset{
+ SlimFakeClientset: slim_fake.NewSimpleClientset(),
+ CiliumFakeClientset: cilium_fake.NewSimpleClientset(),
+ APIExtFakeClientset: apiext_fake.NewSimpleClientset(),
+ KubernetesFakeClientset: fake.NewSimpleClientset(),
+ enabled: true,
+ }
+ client.clientsetGetters = clientsetGetters{&client}
+ return &client, &client
+}
+
+type standaloneLifecycle struct {
+ hooks []hive.HookInterface
+}
+
+func (s *standaloneLifecycle) Append(hook hive.HookInterface) {
+ s.hooks = append(s.hooks, hook)
+}
+
+// NewStandaloneClientset creates a clientset outside hive. To be removed once
+// remaining uses of k8s.Init()/k8s.Client()/etc. have been converted.
+func NewStandaloneClientset(cfg Config) (Clientset, error) {
+ log := logging.DefaultLogger
+ lc := &standaloneLifecycle{}
+
+ clientset, err := newClientset(lc, log, cfg)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, hook := range lc.hooks {
+ if err := hook.Start(context.Background()); err != nil {
+ return nil, err
+ }
+ }
+
+ return clientset, err
+}
+
+func init() {
+ // Register the metav1.Table and metav1.PartialObjectMetadata for the
+ // apiextclientset.
+ utilruntime.Must(slim_metav1.AddMetaToScheme(slim_apiextclientsetscheme.Scheme))
+ utilruntime.Must(slim_metav1beta1.AddMetaToScheme(slim_apiextclientsetscheme.Scheme))
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/clientset.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/clientset.go
new file mode 100644
index 000000000..720686010
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/clientset.go
@@ -0,0 +1,120 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package versioned
+
+import (
+ "fmt"
+ "net/http"
+
+ ciliumv2 "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2"
+ ciliumv2alpha1 "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1"
+ discovery "k8s.io/client-go/discovery"
+ rest "k8s.io/client-go/rest"
+ flowcontrol "k8s.io/client-go/util/flowcontrol"
+)
+
+type Interface interface {
+ Discovery() discovery.DiscoveryInterface
+ CiliumV2() ciliumv2.CiliumV2Interface
+ CiliumV2alpha1() ciliumv2alpha1.CiliumV2alpha1Interface
+}
+
+// Clientset contains the clients for groups.
+type Clientset struct {
+ *discovery.DiscoveryClient
+ ciliumV2 *ciliumv2.CiliumV2Client
+ ciliumV2alpha1 *ciliumv2alpha1.CiliumV2alpha1Client
+}
+
+// CiliumV2 retrieves the CiliumV2Client
+func (c *Clientset) CiliumV2() ciliumv2.CiliumV2Interface {
+ return c.ciliumV2
+}
+
+// CiliumV2alpha1 retrieves the CiliumV2alpha1Client
+func (c *Clientset) CiliumV2alpha1() ciliumv2alpha1.CiliumV2alpha1Interface {
+ return c.ciliumV2alpha1
+}
+
+// Discovery retrieves the DiscoveryClient
+func (c *Clientset) Discovery() discovery.DiscoveryInterface {
+ if c == nil {
+ return nil
+ }
+ return c.DiscoveryClient
+}
+
+// NewForConfig creates a new Clientset for the given config.
+// If config's RateLimiter is not set and QPS and Burst are acceptable,
+// NewForConfig will generate a rate-limiter in configShallowCopy.
+// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
+// where httpClient was generated with rest.HTTPClientFor(c).
+func NewForConfig(c *rest.Config) (*Clientset, error) {
+ configShallowCopy := *c
+
+ if configShallowCopy.UserAgent == "" {
+ configShallowCopy.UserAgent = rest.DefaultKubernetesUserAgent()
+ }
+
+ // share the transport between all clients
+ httpClient, err := rest.HTTPClientFor(&configShallowCopy)
+ if err != nil {
+ return nil, err
+ }
+
+ return NewForConfigAndClient(&configShallowCopy, httpClient)
+}
+
+// NewForConfigAndClient creates a new Clientset for the given config and http client.
+// Note the http client provided takes precedence over the configured transport values.
+// If config's RateLimiter is not set and QPS and Burst are acceptable,
+// NewForConfigAndClient will generate a rate-limiter in configShallowCopy.
+func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, error) {
+ configShallowCopy := *c
+ if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 {
+ if configShallowCopy.Burst <= 0 {
+ return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0")
+ }
+ configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst)
+ }
+
+ var cs Clientset
+ var err error
+ cs.ciliumV2, err = ciliumv2.NewForConfigAndClient(&configShallowCopy, httpClient)
+ if err != nil {
+ return nil, err
+ }
+ cs.ciliumV2alpha1, err = ciliumv2alpha1.NewForConfigAndClient(&configShallowCopy, httpClient)
+ if err != nil {
+ return nil, err
+ }
+
+ cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfigAndClient(&configShallowCopy, httpClient)
+ if err != nil {
+ return nil, err
+ }
+ return &cs, nil
+}
+
+// NewForConfigOrDie creates a new Clientset for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *Clientset {
+ cs, err := NewForConfig(c)
+ if err != nil {
+ panic(err)
+ }
+ return cs
+}
+
+// New creates a new Clientset for the given RESTClient.
+func New(c rest.Interface) *Clientset {
+ var cs Clientset
+ cs.ciliumV2 = ciliumv2.New(c)
+ cs.ciliumV2alpha1 = ciliumv2alpha1.New(c)
+
+ cs.DiscoveryClient = discovery.NewDiscoveryClient(c)
+ return &cs
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/fake/clientset_generated.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/fake/clientset_generated.go
new file mode 100644
index 000000000..f39a5d25e
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/fake/clientset_generated.go
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ clientset "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned"
+ ciliumv2 "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2"
+ fakeciliumv2 "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake"
+ ciliumv2alpha1 "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1"
+ fakeciliumv2alpha1 "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/watch"
+ "k8s.io/client-go/discovery"
+ fakediscovery "k8s.io/client-go/discovery/fake"
+ "k8s.io/client-go/testing"
+)
+
+// NewSimpleClientset returns a clientset that will respond with the provided objects.
+// It's backed by a very simple object tracker that processes creates, updates and deletions as-is,
+// without applying any validations and/or defaults. It shouldn't be considered a replacement
+// for a real clientset and is mostly useful in simple unit tests.
+func NewSimpleClientset(objects ...runtime.Object) *Clientset {
+ o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder())
+ for _, obj := range objects {
+ if err := o.Add(obj); err != nil {
+ panic(err)
+ }
+ }
+
+ cs := &Clientset{tracker: o}
+ cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake}
+ cs.AddReactor("*", "*", testing.ObjectReaction(o))
+ cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) {
+ gvr := action.GetResource()
+ ns := action.GetNamespace()
+ watch, err := o.Watch(gvr, ns)
+ if err != nil {
+ return false, nil, err
+ }
+ return true, watch, nil
+ })
+
+ return cs
+}
+
+// Clientset implements clientset.Interface. Meant to be embedded into a
+// struct to get a default implementation. This makes faking out just the method
+// you want to test easier.
+type Clientset struct {
+ testing.Fake
+ discovery *fakediscovery.FakeDiscovery
+ tracker testing.ObjectTracker
+}
+
+func (c *Clientset) Discovery() discovery.DiscoveryInterface {
+ return c.discovery
+}
+
+func (c *Clientset) Tracker() testing.ObjectTracker {
+ return c.tracker
+}
+
+var (
+ _ clientset.Interface = &Clientset{}
+ _ testing.FakeClient = &Clientset{}
+)
+
+// CiliumV2 retrieves the CiliumV2Client
+func (c *Clientset) CiliumV2() ciliumv2.CiliumV2Interface {
+ return &fakeciliumv2.FakeCiliumV2{Fake: &c.Fake}
+}
+
+// CiliumV2alpha1 retrieves the CiliumV2alpha1Client
+func (c *Clientset) CiliumV2alpha1() ciliumv2alpha1.CiliumV2alpha1Interface {
+ return &fakeciliumv2alpha1.FakeCiliumV2alpha1{Fake: &c.Fake}
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/fake/doc.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/fake/doc.go
new file mode 100644
index 000000000..1da8cc9d5
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/fake/doc.go
@@ -0,0 +1,7 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated fake clientset.
+package fake
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/fake/register.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/fake/register.go
new file mode 100644
index 000000000..22b7a01cc
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/fake/register.go
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ ciliumv2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2"
+ ciliumv2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ serializer "k8s.io/apimachinery/pkg/runtime/serializer"
+ utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+)
+
+var scheme = runtime.NewScheme()
+var codecs = serializer.NewCodecFactory(scheme)
+
+var localSchemeBuilder = runtime.SchemeBuilder{
+ ciliumv2.AddToScheme,
+ ciliumv2alpha1.AddToScheme,
+}
+
+// AddToScheme adds all types of this clientset into the given scheme. This allows composition
+// of clientsets, like in:
+//
+// import (
+// "k8s.io/client-go/kubernetes"
+// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
+// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
+// )
+//
+// kclientset, _ := kubernetes.NewForConfig(c)
+// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
+//
+// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
+// correctly.
+var AddToScheme = localSchemeBuilder.AddToScheme
+
+func init() {
+ v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"})
+ utilruntime.Must(AddToScheme(scheme))
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme/doc.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme/doc.go
new file mode 100644
index 000000000..ba3451535
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme/doc.go
@@ -0,0 +1,7 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package contains the scheme of the automatically generated clientset.
+package scheme
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme/register.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme/register.go
new file mode 100644
index 000000000..59689a040
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme/register.go
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package scheme
+
+import (
+ ciliumv2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2"
+ ciliumv2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ serializer "k8s.io/apimachinery/pkg/runtime/serializer"
+ utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+)
+
+var Scheme = runtime.NewScheme()
+var Codecs = serializer.NewCodecFactory(Scheme)
+var ParameterCodec = runtime.NewParameterCodec(Scheme)
+var localSchemeBuilder = runtime.SchemeBuilder{
+ ciliumv2.AddToScheme,
+ ciliumv2alpha1.AddToScheme,
+}
+
+// AddToScheme adds all types of this clientset into the given scheme. This allows composition
+// of clientsets, like in:
+//
+// import (
+// "k8s.io/client-go/kubernetes"
+// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
+// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
+// )
+//
+// kclientset, _ := kubernetes.NewForConfig(c)
+// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
+//
+// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
+// correctly.
+var AddToScheme = localSchemeBuilder.AddToScheme
+
+func init() {
+ v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"})
+ utilruntime.Must(AddToScheme(Scheme))
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/cilium.io_client.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/cilium.io_client.go
new file mode 100644
index 000000000..39002ac0d
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/cilium.io_client.go
@@ -0,0 +1,139 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v2
+
+import (
+ "net/http"
+
+ v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2"
+ "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme"
+ rest "k8s.io/client-go/rest"
+)
+
+type CiliumV2Interface interface {
+ RESTClient() rest.Interface
+ CiliumClusterwideEnvoyConfigsGetter
+ CiliumClusterwideNetworkPoliciesGetter
+ CiliumEgressGatewayPoliciesGetter
+ CiliumEndpointsGetter
+ CiliumEnvoyConfigsGetter
+ CiliumExternalWorkloadsGetter
+ CiliumIdentitiesGetter
+ CiliumLocalRedirectPoliciesGetter
+ CiliumNetworkPoliciesGetter
+ CiliumNodesGetter
+}
+
+// CiliumV2Client is used to interact with features provided by the cilium.io group.
+type CiliumV2Client struct {
+ restClient rest.Interface
+}
+
+func (c *CiliumV2Client) CiliumClusterwideEnvoyConfigs() CiliumClusterwideEnvoyConfigInterface {
+ return newCiliumClusterwideEnvoyConfigs(c)
+}
+
+func (c *CiliumV2Client) CiliumClusterwideNetworkPolicies() CiliumClusterwideNetworkPolicyInterface {
+ return newCiliumClusterwideNetworkPolicies(c)
+}
+
+func (c *CiliumV2Client) CiliumEgressGatewayPolicies() CiliumEgressGatewayPolicyInterface {
+ return newCiliumEgressGatewayPolicies(c)
+}
+
+func (c *CiliumV2Client) CiliumEndpoints(namespace string) CiliumEndpointInterface {
+ return newCiliumEndpoints(c, namespace)
+}
+
+func (c *CiliumV2Client) CiliumEnvoyConfigs(namespace string) CiliumEnvoyConfigInterface {
+ return newCiliumEnvoyConfigs(c, namespace)
+}
+
+func (c *CiliumV2Client) CiliumExternalWorkloads() CiliumExternalWorkloadInterface {
+ return newCiliumExternalWorkloads(c)
+}
+
+func (c *CiliumV2Client) CiliumIdentities() CiliumIdentityInterface {
+ return newCiliumIdentities(c)
+}
+
+func (c *CiliumV2Client) CiliumLocalRedirectPolicies(namespace string) CiliumLocalRedirectPolicyInterface {
+ return newCiliumLocalRedirectPolicies(c, namespace)
+}
+
+func (c *CiliumV2Client) CiliumNetworkPolicies(namespace string) CiliumNetworkPolicyInterface {
+ return newCiliumNetworkPolicies(c, namespace)
+}
+
+func (c *CiliumV2Client) CiliumNodes() CiliumNodeInterface {
+ return newCiliumNodes(c)
+}
+
+// NewForConfig creates a new CiliumV2Client for the given config.
+// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
+// where httpClient was generated with rest.HTTPClientFor(c).
+func NewForConfig(c *rest.Config) (*CiliumV2Client, error) {
+ config := *c
+ if err := setConfigDefaults(&config); err != nil {
+ return nil, err
+ }
+ httpClient, err := rest.HTTPClientFor(&config)
+ if err != nil {
+ return nil, err
+ }
+ return NewForConfigAndClient(&config, httpClient)
+}
+
+// NewForConfigAndClient creates a new CiliumV2Client for the given config and http client.
+// Note the http client provided takes precedence over the configured transport values.
+func NewForConfigAndClient(c *rest.Config, h *http.Client) (*CiliumV2Client, error) {
+ config := *c
+ if err := setConfigDefaults(&config); err != nil {
+ return nil, err
+ }
+ client, err := rest.RESTClientForConfigAndClient(&config, h)
+ if err != nil {
+ return nil, err
+ }
+ return &CiliumV2Client{client}, nil
+}
+
+// NewForConfigOrDie creates a new CiliumV2Client for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *CiliumV2Client {
+ client, err := NewForConfig(c)
+ if err != nil {
+ panic(err)
+ }
+ return client
+}
+
+// New creates a new CiliumV2Client for the given RESTClient.
+func New(c rest.Interface) *CiliumV2Client {
+ return &CiliumV2Client{c}
+}
+
+func setConfigDefaults(config *rest.Config) error {
+ gv := v2.SchemeGroupVersion
+ config.GroupVersion = &gv
+ config.APIPath = "/apis"
+ config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+
+ if config.UserAgent == "" {
+ config.UserAgent = rest.DefaultKubernetesUserAgent()
+ }
+
+ return nil
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *CiliumV2Client) RESTClient() rest.Interface {
+ if c == nil {
+ return nil
+ }
+ return c.restClient
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/ciliumclusterwideenvoyconfig.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/ciliumclusterwideenvoyconfig.go
new file mode 100644
index 000000000..cf8eecec3
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/ciliumclusterwideenvoyconfig.go
@@ -0,0 +1,155 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v2
+
+import (
+ "context"
+ "time"
+
+ v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2"
+ scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// CiliumClusterwideEnvoyConfigsGetter has a method to return a CiliumClusterwideEnvoyConfigInterface.
+// A group's client should implement this interface.
+type CiliumClusterwideEnvoyConfigsGetter interface {
+ CiliumClusterwideEnvoyConfigs() CiliumClusterwideEnvoyConfigInterface
+}
+
+// CiliumClusterwideEnvoyConfigInterface has methods to work with CiliumClusterwideEnvoyConfig resources.
+type CiliumClusterwideEnvoyConfigInterface interface {
+ Create(ctx context.Context, ciliumClusterwideEnvoyConfig *v2.CiliumClusterwideEnvoyConfig, opts v1.CreateOptions) (*v2.CiliumClusterwideEnvoyConfig, error)
+ Update(ctx context.Context, ciliumClusterwideEnvoyConfig *v2.CiliumClusterwideEnvoyConfig, opts v1.UpdateOptions) (*v2.CiliumClusterwideEnvoyConfig, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*v2.CiliumClusterwideEnvoyConfig, error)
+ List(ctx context.Context, opts v1.ListOptions) (*v2.CiliumClusterwideEnvoyConfigList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.CiliumClusterwideEnvoyConfig, err error)
+ CiliumClusterwideEnvoyConfigExpansion
+}
+
+// ciliumClusterwideEnvoyConfigs implements CiliumClusterwideEnvoyConfigInterface
+type ciliumClusterwideEnvoyConfigs struct {
+ client rest.Interface
+}
+
+// newCiliumClusterwideEnvoyConfigs returns a CiliumClusterwideEnvoyConfigs
+func newCiliumClusterwideEnvoyConfigs(c *CiliumV2Client) *ciliumClusterwideEnvoyConfigs {
+ return &ciliumClusterwideEnvoyConfigs{
+ client: c.RESTClient(),
+ }
+}
+
+// Get takes name of the ciliumClusterwideEnvoyConfig, and returns the corresponding ciliumClusterwideEnvoyConfig object, and an error if there is any.
+func (c *ciliumClusterwideEnvoyConfigs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2.CiliumClusterwideEnvoyConfig, err error) {
+ result = &v2.CiliumClusterwideEnvoyConfig{}
+ err = c.client.Get().
+ Resource("ciliumclusterwideenvoyconfigs").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of CiliumClusterwideEnvoyConfigs that match those selectors.
+func (c *ciliumClusterwideEnvoyConfigs) List(ctx context.Context, opts v1.ListOptions) (result *v2.CiliumClusterwideEnvoyConfigList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v2.CiliumClusterwideEnvoyConfigList{}
+ err = c.client.Get().
+ Resource("ciliumclusterwideenvoyconfigs").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested ciliumClusterwideEnvoyConfigs.
+func (c *ciliumClusterwideEnvoyConfigs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Resource("ciliumclusterwideenvoyconfigs").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a ciliumClusterwideEnvoyConfig and creates it. Returns the server's representation of the ciliumClusterwideEnvoyConfig, and an error, if there is any.
+func (c *ciliumClusterwideEnvoyConfigs) Create(ctx context.Context, ciliumClusterwideEnvoyConfig *v2.CiliumClusterwideEnvoyConfig, opts v1.CreateOptions) (result *v2.CiliumClusterwideEnvoyConfig, err error) {
+ result = &v2.CiliumClusterwideEnvoyConfig{}
+ err = c.client.Post().
+ Resource("ciliumclusterwideenvoyconfigs").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(ciliumClusterwideEnvoyConfig).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a ciliumClusterwideEnvoyConfig and updates it. Returns the server's representation of the ciliumClusterwideEnvoyConfig, and an error, if there is any.
+func (c *ciliumClusterwideEnvoyConfigs) Update(ctx context.Context, ciliumClusterwideEnvoyConfig *v2.CiliumClusterwideEnvoyConfig, opts v1.UpdateOptions) (result *v2.CiliumClusterwideEnvoyConfig, err error) {
+ result = &v2.CiliumClusterwideEnvoyConfig{}
+ err = c.client.Put().
+ Resource("ciliumclusterwideenvoyconfigs").
+ Name(ciliumClusterwideEnvoyConfig.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(ciliumClusterwideEnvoyConfig).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the ciliumClusterwideEnvoyConfig and deletes it. Returns an error if one occurs.
+func (c *ciliumClusterwideEnvoyConfigs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ return c.client.Delete().
+ Resource("ciliumclusterwideenvoyconfigs").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *ciliumClusterwideEnvoyConfigs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Resource("ciliumclusterwideenvoyconfigs").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched ciliumClusterwideEnvoyConfig.
+func (c *ciliumClusterwideEnvoyConfigs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.CiliumClusterwideEnvoyConfig, err error) {
+ result = &v2.CiliumClusterwideEnvoyConfig{}
+ err = c.client.Patch(pt).
+ Resource("ciliumclusterwideenvoyconfigs").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/ciliumclusterwidenetworkpolicy.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/ciliumclusterwidenetworkpolicy.go
new file mode 100644
index 000000000..56cde2626
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/ciliumclusterwidenetworkpolicy.go
@@ -0,0 +1,171 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v2
+
+import (
+ "context"
+ "time"
+
+ v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2"
+ scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// CiliumClusterwideNetworkPoliciesGetter has a method to return a CiliumClusterwideNetworkPolicyInterface.
+// A group's client should implement this interface.
+type CiliumClusterwideNetworkPoliciesGetter interface {
+ CiliumClusterwideNetworkPolicies() CiliumClusterwideNetworkPolicyInterface
+}
+
+// CiliumClusterwideNetworkPolicyInterface has methods to work with CiliumClusterwideNetworkPolicy resources.
+type CiliumClusterwideNetworkPolicyInterface interface {
+ Create(ctx context.Context, ciliumClusterwideNetworkPolicy *v2.CiliumClusterwideNetworkPolicy, opts v1.CreateOptions) (*v2.CiliumClusterwideNetworkPolicy, error)
+ Update(ctx context.Context, ciliumClusterwideNetworkPolicy *v2.CiliumClusterwideNetworkPolicy, opts v1.UpdateOptions) (*v2.CiliumClusterwideNetworkPolicy, error)
+ UpdateStatus(ctx context.Context, ciliumClusterwideNetworkPolicy *v2.CiliumClusterwideNetworkPolicy, opts v1.UpdateOptions) (*v2.CiliumClusterwideNetworkPolicy, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*v2.CiliumClusterwideNetworkPolicy, error)
+ List(ctx context.Context, opts v1.ListOptions) (*v2.CiliumClusterwideNetworkPolicyList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.CiliumClusterwideNetworkPolicy, err error)
+ CiliumClusterwideNetworkPolicyExpansion
+}
+
+// ciliumClusterwideNetworkPolicies implements CiliumClusterwideNetworkPolicyInterface
+type ciliumClusterwideNetworkPolicies struct {
+ client rest.Interface
+}
+
+// newCiliumClusterwideNetworkPolicies returns a CiliumClusterwideNetworkPolicies
+func newCiliumClusterwideNetworkPolicies(c *CiliumV2Client) *ciliumClusterwideNetworkPolicies {
+ return &ciliumClusterwideNetworkPolicies{
+ client: c.RESTClient(),
+ }
+}
+
+// Get takes name of the ciliumClusterwideNetworkPolicy, and returns the corresponding ciliumClusterwideNetworkPolicy object, and an error if there is any.
+func (c *ciliumClusterwideNetworkPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2.CiliumClusterwideNetworkPolicy, err error) {
+ result = &v2.CiliumClusterwideNetworkPolicy{}
+ err = c.client.Get().
+ Resource("ciliumclusterwidenetworkpolicies").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of CiliumClusterwideNetworkPolicies that match those selectors.
+func (c *ciliumClusterwideNetworkPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v2.CiliumClusterwideNetworkPolicyList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v2.CiliumClusterwideNetworkPolicyList{}
+ err = c.client.Get().
+ Resource("ciliumclusterwidenetworkpolicies").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested ciliumClusterwideNetworkPolicies.
+func (c *ciliumClusterwideNetworkPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Resource("ciliumclusterwidenetworkpolicies").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a ciliumClusterwideNetworkPolicy and creates it. Returns the server's representation of the ciliumClusterwideNetworkPolicy, and an error, if there is any.
+func (c *ciliumClusterwideNetworkPolicies) Create(ctx context.Context, ciliumClusterwideNetworkPolicy *v2.CiliumClusterwideNetworkPolicy, opts v1.CreateOptions) (result *v2.CiliumClusterwideNetworkPolicy, err error) {
+ result = &v2.CiliumClusterwideNetworkPolicy{}
+ err = c.client.Post().
+ Resource("ciliumclusterwidenetworkpolicies").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(ciliumClusterwideNetworkPolicy).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a ciliumClusterwideNetworkPolicy and updates it. Returns the server's representation of the ciliumClusterwideNetworkPolicy, and an error, if there is any.
+func (c *ciliumClusterwideNetworkPolicies) Update(ctx context.Context, ciliumClusterwideNetworkPolicy *v2.CiliumClusterwideNetworkPolicy, opts v1.UpdateOptions) (result *v2.CiliumClusterwideNetworkPolicy, err error) {
+ result = &v2.CiliumClusterwideNetworkPolicy{}
+ err = c.client.Put().
+ Resource("ciliumclusterwidenetworkpolicies").
+ Name(ciliumClusterwideNetworkPolicy.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(ciliumClusterwideNetworkPolicy).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *ciliumClusterwideNetworkPolicies) UpdateStatus(ctx context.Context, ciliumClusterwideNetworkPolicy *v2.CiliumClusterwideNetworkPolicy, opts v1.UpdateOptions) (result *v2.CiliumClusterwideNetworkPolicy, err error) {
+ result = &v2.CiliumClusterwideNetworkPolicy{}
+ err = c.client.Put().
+ Resource("ciliumclusterwidenetworkpolicies").
+ Name(ciliumClusterwideNetworkPolicy.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(ciliumClusterwideNetworkPolicy).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the ciliumClusterwideNetworkPolicy and deletes it. Returns an error if one occurs.
+func (c *ciliumClusterwideNetworkPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ return c.client.Delete().
+ Resource("ciliumclusterwidenetworkpolicies").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *ciliumClusterwideNetworkPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Resource("ciliumclusterwidenetworkpolicies").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched ciliumClusterwideNetworkPolicy.
+func (c *ciliumClusterwideNetworkPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.CiliumClusterwideNetworkPolicy, err error) {
+ result = &v2.CiliumClusterwideNetworkPolicy{}
+ err = c.client.Patch(pt).
+ Resource("ciliumclusterwidenetworkpolicies").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/ciliumegressgatewaypolicy.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/ciliumegressgatewaypolicy.go
new file mode 100644
index 000000000..625a89b74
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/ciliumegressgatewaypolicy.go
@@ -0,0 +1,155 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v2
+
+import (
+ "context"
+ "time"
+
+ v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2"
+ scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// CiliumEgressGatewayPoliciesGetter has a method to return a CiliumEgressGatewayPolicyInterface.
+// A group's client should implement this interface.
+type CiliumEgressGatewayPoliciesGetter interface {
+ CiliumEgressGatewayPolicies() CiliumEgressGatewayPolicyInterface
+}
+
+// CiliumEgressGatewayPolicyInterface has methods to work with CiliumEgressGatewayPolicy resources.
+type CiliumEgressGatewayPolicyInterface interface {
+ Create(ctx context.Context, ciliumEgressGatewayPolicy *v2.CiliumEgressGatewayPolicy, opts v1.CreateOptions) (*v2.CiliumEgressGatewayPolicy, error)
+ Update(ctx context.Context, ciliumEgressGatewayPolicy *v2.CiliumEgressGatewayPolicy, opts v1.UpdateOptions) (*v2.CiliumEgressGatewayPolicy, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*v2.CiliumEgressGatewayPolicy, error)
+ List(ctx context.Context, opts v1.ListOptions) (*v2.CiliumEgressGatewayPolicyList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.CiliumEgressGatewayPolicy, err error)
+ CiliumEgressGatewayPolicyExpansion
+}
+
+// ciliumEgressGatewayPolicies implements CiliumEgressGatewayPolicyInterface
+type ciliumEgressGatewayPolicies struct {
+ client rest.Interface
+}
+
+// newCiliumEgressGatewayPolicies returns a CiliumEgressGatewayPolicies
+func newCiliumEgressGatewayPolicies(c *CiliumV2Client) *ciliumEgressGatewayPolicies {
+ return &ciliumEgressGatewayPolicies{
+ client: c.RESTClient(),
+ }
+}
+
+// Get takes name of the ciliumEgressGatewayPolicy, and returns the corresponding ciliumEgressGatewayPolicy object, and an error if there is any.
+func (c *ciliumEgressGatewayPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2.CiliumEgressGatewayPolicy, err error) {
+ result = &v2.CiliumEgressGatewayPolicy{}
+ err = c.client.Get().
+ Resource("ciliumegressgatewaypolicies").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of CiliumEgressGatewayPolicies that match those selectors.
+func (c *ciliumEgressGatewayPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v2.CiliumEgressGatewayPolicyList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v2.CiliumEgressGatewayPolicyList{}
+ err = c.client.Get().
+ Resource("ciliumegressgatewaypolicies").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested ciliumEgressGatewayPolicies.
+func (c *ciliumEgressGatewayPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Resource("ciliumegressgatewaypolicies").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a ciliumEgressGatewayPolicy and creates it. Returns the server's representation of the ciliumEgressGatewayPolicy, and an error, if there is any.
+func (c *ciliumEgressGatewayPolicies) Create(ctx context.Context, ciliumEgressGatewayPolicy *v2.CiliumEgressGatewayPolicy, opts v1.CreateOptions) (result *v2.CiliumEgressGatewayPolicy, err error) {
+ result = &v2.CiliumEgressGatewayPolicy{}
+ err = c.client.Post().
+ Resource("ciliumegressgatewaypolicies").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(ciliumEgressGatewayPolicy).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a ciliumEgressGatewayPolicy and updates it. Returns the server's representation of the ciliumEgressGatewayPolicy, and an error, if there is any.
+func (c *ciliumEgressGatewayPolicies) Update(ctx context.Context, ciliumEgressGatewayPolicy *v2.CiliumEgressGatewayPolicy, opts v1.UpdateOptions) (result *v2.CiliumEgressGatewayPolicy, err error) {
+ result = &v2.CiliumEgressGatewayPolicy{}
+ err = c.client.Put().
+ Resource("ciliumegressgatewaypolicies").
+ Name(ciliumEgressGatewayPolicy.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(ciliumEgressGatewayPolicy).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the ciliumEgressGatewayPolicy and deletes it. Returns an error if one occurs.
+func (c *ciliumEgressGatewayPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ return c.client.Delete().
+ Resource("ciliumegressgatewaypolicies").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *ciliumEgressGatewayPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Resource("ciliumegressgatewaypolicies").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched ciliumEgressGatewayPolicy.
+func (c *ciliumEgressGatewayPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.CiliumEgressGatewayPolicy, err error) {
+ result = &v2.CiliumEgressGatewayPolicy{}
+ err = c.client.Patch(pt).
+ Resource("ciliumegressgatewaypolicies").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/ciliumendpoint.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/ciliumendpoint.go
new file mode 100644
index 000000000..3cd66d46a
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/ciliumendpoint.go
@@ -0,0 +1,182 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v2
+
+import (
+ "context"
+ "time"
+
+ v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2"
+ scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// CiliumEndpointsGetter has a method to return a CiliumEndpointInterface.
+// A group's client should implement this interface.
+type CiliumEndpointsGetter interface {
+ CiliumEndpoints(namespace string) CiliumEndpointInterface
+}
+
+// CiliumEndpointInterface has methods to work with CiliumEndpoint resources.
+type CiliumEndpointInterface interface {
+ Create(ctx context.Context, ciliumEndpoint *v2.CiliumEndpoint, opts v1.CreateOptions) (*v2.CiliumEndpoint, error)
+ Update(ctx context.Context, ciliumEndpoint *v2.CiliumEndpoint, opts v1.UpdateOptions) (*v2.CiliumEndpoint, error)
+ UpdateStatus(ctx context.Context, ciliumEndpoint *v2.CiliumEndpoint, opts v1.UpdateOptions) (*v2.CiliumEndpoint, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*v2.CiliumEndpoint, error)
+ List(ctx context.Context, opts v1.ListOptions) (*v2.CiliumEndpointList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.CiliumEndpoint, err error)
+ CiliumEndpointExpansion
+}
+
+// ciliumEndpoints implements CiliumEndpointInterface
+type ciliumEndpoints struct {
+ client rest.Interface
+ ns string
+}
+
+// newCiliumEndpoints returns a CiliumEndpoints
+func newCiliumEndpoints(c *CiliumV2Client, namespace string) *ciliumEndpoints {
+ return &ciliumEndpoints{
+ client: c.RESTClient(),
+ ns: namespace,
+ }
+}
+
+// Get takes name of the ciliumEndpoint, and returns the corresponding ciliumEndpoint object, and an error if there is any.
+func (c *ciliumEndpoints) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2.CiliumEndpoint, err error) {
+ result = &v2.CiliumEndpoint{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("ciliumendpoints").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of CiliumEndpoints that match those selectors.
+func (c *ciliumEndpoints) List(ctx context.Context, opts v1.ListOptions) (result *v2.CiliumEndpointList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v2.CiliumEndpointList{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("ciliumendpoints").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested ciliumEndpoints.
+func (c *ciliumEndpoints) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Namespace(c.ns).
+ Resource("ciliumendpoints").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a ciliumEndpoint and creates it. Returns the server's representation of the ciliumEndpoint, and an error, if there is any.
+func (c *ciliumEndpoints) Create(ctx context.Context, ciliumEndpoint *v2.CiliumEndpoint, opts v1.CreateOptions) (result *v2.CiliumEndpoint, err error) {
+ result = &v2.CiliumEndpoint{}
+ err = c.client.Post().
+ Namespace(c.ns).
+ Resource("ciliumendpoints").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(ciliumEndpoint).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a ciliumEndpoint and updates it. Returns the server's representation of the ciliumEndpoint, and an error, if there is any.
+func (c *ciliumEndpoints) Update(ctx context.Context, ciliumEndpoint *v2.CiliumEndpoint, opts v1.UpdateOptions) (result *v2.CiliumEndpoint, err error) {
+ result = &v2.CiliumEndpoint{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("ciliumendpoints").
+ Name(ciliumEndpoint.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(ciliumEndpoint).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *ciliumEndpoints) UpdateStatus(ctx context.Context, ciliumEndpoint *v2.CiliumEndpoint, opts v1.UpdateOptions) (result *v2.CiliumEndpoint, err error) {
+ result = &v2.CiliumEndpoint{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("ciliumendpoints").
+ Name(ciliumEndpoint.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(ciliumEndpoint).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the ciliumEndpoint and deletes it. Returns an error if one occurs.
+func (c *ciliumEndpoints) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("ciliumendpoints").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *ciliumEndpoints) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("ciliumendpoints").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched ciliumEndpoint.
+func (c *ciliumEndpoints) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.CiliumEndpoint, err error) {
+ result = &v2.CiliumEndpoint{}
+ err = c.client.Patch(pt).
+ Namespace(c.ns).
+ Resource("ciliumendpoints").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/ciliumenvoyconfig.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/ciliumenvoyconfig.go
new file mode 100644
index 000000000..9f5acd456
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/ciliumenvoyconfig.go
@@ -0,0 +1,165 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v2
+
+import (
+ "context"
+ "time"
+
+ v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2"
+ scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// CiliumEnvoyConfigsGetter has a method to return a CiliumEnvoyConfigInterface.
+// A group's client should implement this interface.
+type CiliumEnvoyConfigsGetter interface {
+ CiliumEnvoyConfigs(namespace string) CiliumEnvoyConfigInterface
+}
+
+// CiliumEnvoyConfigInterface has methods to work with CiliumEnvoyConfig resources.
+type CiliumEnvoyConfigInterface interface {
+ Create(ctx context.Context, ciliumEnvoyConfig *v2.CiliumEnvoyConfig, opts v1.CreateOptions) (*v2.CiliumEnvoyConfig, error)
+ Update(ctx context.Context, ciliumEnvoyConfig *v2.CiliumEnvoyConfig, opts v1.UpdateOptions) (*v2.CiliumEnvoyConfig, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*v2.CiliumEnvoyConfig, error)
+ List(ctx context.Context, opts v1.ListOptions) (*v2.CiliumEnvoyConfigList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.CiliumEnvoyConfig, err error)
+ CiliumEnvoyConfigExpansion
+}
+
+// ciliumEnvoyConfigs implements CiliumEnvoyConfigInterface
+type ciliumEnvoyConfigs struct {
+ client rest.Interface
+ ns string
+}
+
+// newCiliumEnvoyConfigs returns a CiliumEnvoyConfigs
+func newCiliumEnvoyConfigs(c *CiliumV2Client, namespace string) *ciliumEnvoyConfigs {
+ return &ciliumEnvoyConfigs{
+ client: c.RESTClient(),
+ ns: namespace,
+ }
+}
+
+// Get takes name of the ciliumEnvoyConfig, and returns the corresponding ciliumEnvoyConfig object, and an error if there is any.
+func (c *ciliumEnvoyConfigs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2.CiliumEnvoyConfig, err error) {
+ result = &v2.CiliumEnvoyConfig{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("ciliumenvoyconfigs").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of CiliumEnvoyConfigs that match those selectors.
+func (c *ciliumEnvoyConfigs) List(ctx context.Context, opts v1.ListOptions) (result *v2.CiliumEnvoyConfigList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v2.CiliumEnvoyConfigList{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("ciliumenvoyconfigs").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested ciliumEnvoyConfigs.
+func (c *ciliumEnvoyConfigs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Namespace(c.ns).
+ Resource("ciliumenvoyconfigs").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a ciliumEnvoyConfig and creates it. Returns the server's representation of the ciliumEnvoyConfig, and an error, if there is any.
+func (c *ciliumEnvoyConfigs) Create(ctx context.Context, ciliumEnvoyConfig *v2.CiliumEnvoyConfig, opts v1.CreateOptions) (result *v2.CiliumEnvoyConfig, err error) {
+ result = &v2.CiliumEnvoyConfig{}
+ err = c.client.Post().
+ Namespace(c.ns).
+ Resource("ciliumenvoyconfigs").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(ciliumEnvoyConfig).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a ciliumEnvoyConfig and updates it. Returns the server's representation of the ciliumEnvoyConfig, and an error, if there is any.
+func (c *ciliumEnvoyConfigs) Update(ctx context.Context, ciliumEnvoyConfig *v2.CiliumEnvoyConfig, opts v1.UpdateOptions) (result *v2.CiliumEnvoyConfig, err error) {
+ result = &v2.CiliumEnvoyConfig{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("ciliumenvoyconfigs").
+ Name(ciliumEnvoyConfig.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(ciliumEnvoyConfig).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the ciliumEnvoyConfig and deletes it. Returns an error if one occurs.
+func (c *ciliumEnvoyConfigs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("ciliumenvoyconfigs").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *ciliumEnvoyConfigs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("ciliumenvoyconfigs").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched ciliumEnvoyConfig.
+func (c *ciliumEnvoyConfigs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.CiliumEnvoyConfig, err error) {
+ result = &v2.CiliumEnvoyConfig{}
+ err = c.client.Patch(pt).
+ Namespace(c.ns).
+ Resource("ciliumenvoyconfigs").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/ciliumexternalworkload.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/ciliumexternalworkload.go
new file mode 100644
index 000000000..261cbfeed
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/ciliumexternalworkload.go
@@ -0,0 +1,171 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v2
+
+import (
+ "context"
+ "time"
+
+ v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2"
+ scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// CiliumExternalWorkloadsGetter has a method to return a CiliumExternalWorkloadInterface.
+// A group's client should implement this interface.
+type CiliumExternalWorkloadsGetter interface {
+ CiliumExternalWorkloads() CiliumExternalWorkloadInterface
+}
+
+// CiliumExternalWorkloadInterface has methods to work with CiliumExternalWorkload resources.
+type CiliumExternalWorkloadInterface interface {
+ Create(ctx context.Context, ciliumExternalWorkload *v2.CiliumExternalWorkload, opts v1.CreateOptions) (*v2.CiliumExternalWorkload, error)
+ Update(ctx context.Context, ciliumExternalWorkload *v2.CiliumExternalWorkload, opts v1.UpdateOptions) (*v2.CiliumExternalWorkload, error)
+ UpdateStatus(ctx context.Context, ciliumExternalWorkload *v2.CiliumExternalWorkload, opts v1.UpdateOptions) (*v2.CiliumExternalWorkload, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*v2.CiliumExternalWorkload, error)
+ List(ctx context.Context, opts v1.ListOptions) (*v2.CiliumExternalWorkloadList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.CiliumExternalWorkload, err error)
+ CiliumExternalWorkloadExpansion
+}
+
+// ciliumExternalWorkloads implements CiliumExternalWorkloadInterface
+type ciliumExternalWorkloads struct {
+ client rest.Interface
+}
+
+// newCiliumExternalWorkloads returns a CiliumExternalWorkloads
+func newCiliumExternalWorkloads(c *CiliumV2Client) *ciliumExternalWorkloads {
+ return &ciliumExternalWorkloads{
+ client: c.RESTClient(),
+ }
+}
+
+// Get takes name of the ciliumExternalWorkload, and returns the corresponding ciliumExternalWorkload object, and an error if there is any.
+func (c *ciliumExternalWorkloads) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2.CiliumExternalWorkload, err error) {
+ result = &v2.CiliumExternalWorkload{}
+ err = c.client.Get().
+ Resource("ciliumexternalworkloads").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of CiliumExternalWorkloads that match those selectors.
+func (c *ciliumExternalWorkloads) List(ctx context.Context, opts v1.ListOptions) (result *v2.CiliumExternalWorkloadList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v2.CiliumExternalWorkloadList{}
+ err = c.client.Get().
+ Resource("ciliumexternalworkloads").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested ciliumExternalWorkloads.
+func (c *ciliumExternalWorkloads) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Resource("ciliumexternalworkloads").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a ciliumExternalWorkload and creates it. Returns the server's representation of the ciliumExternalWorkload, and an error, if there is any.
+func (c *ciliumExternalWorkloads) Create(ctx context.Context, ciliumExternalWorkload *v2.CiliumExternalWorkload, opts v1.CreateOptions) (result *v2.CiliumExternalWorkload, err error) {
+ result = &v2.CiliumExternalWorkload{}
+ err = c.client.Post().
+ Resource("ciliumexternalworkloads").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(ciliumExternalWorkload).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a ciliumExternalWorkload and updates it. Returns the server's representation of the ciliumExternalWorkload, and an error, if there is any.
+func (c *ciliumExternalWorkloads) Update(ctx context.Context, ciliumExternalWorkload *v2.CiliumExternalWorkload, opts v1.UpdateOptions) (result *v2.CiliumExternalWorkload, err error) {
+ result = &v2.CiliumExternalWorkload{}
+ err = c.client.Put().
+ Resource("ciliumexternalworkloads").
+ Name(ciliumExternalWorkload.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(ciliumExternalWorkload).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *ciliumExternalWorkloads) UpdateStatus(ctx context.Context, ciliumExternalWorkload *v2.CiliumExternalWorkload, opts v1.UpdateOptions) (result *v2.CiliumExternalWorkload, err error) {
+ result = &v2.CiliumExternalWorkload{}
+ err = c.client.Put().
+ Resource("ciliumexternalworkloads").
+ Name(ciliumExternalWorkload.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(ciliumExternalWorkload).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the ciliumExternalWorkload and deletes it. Returns an error if one occurs.
+func (c *ciliumExternalWorkloads) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ return c.client.Delete().
+ Resource("ciliumexternalworkloads").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *ciliumExternalWorkloads) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Resource("ciliumexternalworkloads").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched ciliumExternalWorkload.
+func (c *ciliumExternalWorkloads) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.CiliumExternalWorkload, err error) {
+ result = &v2.CiliumExternalWorkload{}
+ err = c.client.Patch(pt).
+ Resource("ciliumexternalworkloads").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/ciliumidentity.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/ciliumidentity.go
new file mode 100644
index 000000000..c23f01726
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/ciliumidentity.go
@@ -0,0 +1,155 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v2
+
+import (
+ "context"
+ "time"
+
+ v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2"
+ scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// CiliumIdentitiesGetter has a method to return a CiliumIdentityInterface.
+// A group's client should implement this interface.
+type CiliumIdentitiesGetter interface {
+ CiliumIdentities() CiliumIdentityInterface
+}
+
+// CiliumIdentityInterface has methods to work with CiliumIdentity resources.
+type CiliumIdentityInterface interface {
+ Create(ctx context.Context, ciliumIdentity *v2.CiliumIdentity, opts v1.CreateOptions) (*v2.CiliumIdentity, error)
+ Update(ctx context.Context, ciliumIdentity *v2.CiliumIdentity, opts v1.UpdateOptions) (*v2.CiliumIdentity, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*v2.CiliumIdentity, error)
+ List(ctx context.Context, opts v1.ListOptions) (*v2.CiliumIdentityList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.CiliumIdentity, err error)
+ CiliumIdentityExpansion
+}
+
+// ciliumIdentities implements CiliumIdentityInterface
+type ciliumIdentities struct {
+ client rest.Interface
+}
+
+// newCiliumIdentities returns a CiliumIdentities
+func newCiliumIdentities(c *CiliumV2Client) *ciliumIdentities {
+ return &ciliumIdentities{
+ client: c.RESTClient(),
+ }
+}
+
+// Get takes name of the ciliumIdentity, and returns the corresponding ciliumIdentity object, and an error if there is any.
+func (c *ciliumIdentities) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2.CiliumIdentity, err error) {
+ result = &v2.CiliumIdentity{}
+ err = c.client.Get().
+ Resource("ciliumidentities").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of CiliumIdentities that match those selectors.
+func (c *ciliumIdentities) List(ctx context.Context, opts v1.ListOptions) (result *v2.CiliumIdentityList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v2.CiliumIdentityList{}
+ err = c.client.Get().
+ Resource("ciliumidentities").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested ciliumIdentities.
+func (c *ciliumIdentities) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Resource("ciliumidentities").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a ciliumIdentity and creates it. Returns the server's representation of the ciliumIdentity, and an error, if there is any.
+func (c *ciliumIdentities) Create(ctx context.Context, ciliumIdentity *v2.CiliumIdentity, opts v1.CreateOptions) (result *v2.CiliumIdentity, err error) {
+ result = &v2.CiliumIdentity{}
+ err = c.client.Post().
+ Resource("ciliumidentities").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(ciliumIdentity).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a ciliumIdentity and updates it. Returns the server's representation of the ciliumIdentity, and an error, if there is any.
+func (c *ciliumIdentities) Update(ctx context.Context, ciliumIdentity *v2.CiliumIdentity, opts v1.UpdateOptions) (result *v2.CiliumIdentity, err error) {
+ result = &v2.CiliumIdentity{}
+ err = c.client.Put().
+ Resource("ciliumidentities").
+ Name(ciliumIdentity.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(ciliumIdentity).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the ciliumIdentity and deletes it. Returns an error if one occurs.
+func (c *ciliumIdentities) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ return c.client.Delete().
+ Resource("ciliumidentities").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *ciliumIdentities) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Resource("ciliumidentities").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched ciliumIdentity.
+func (c *ciliumIdentities) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.CiliumIdentity, err error) {
+ result = &v2.CiliumIdentity{}
+ err = c.client.Patch(pt).
+ Resource("ciliumidentities").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/ciliumlocalredirectpolicy.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/ciliumlocalredirectpolicy.go
new file mode 100644
index 000000000..9c839eb94
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/ciliumlocalredirectpolicy.go
@@ -0,0 +1,182 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v2
+
+import (
+ "context"
+ "time"
+
+ v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2"
+ scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// CiliumLocalRedirectPoliciesGetter has a method to return a CiliumLocalRedirectPolicyInterface.
+// A group's client should implement this interface.
+type CiliumLocalRedirectPoliciesGetter interface {
+ CiliumLocalRedirectPolicies(namespace string) CiliumLocalRedirectPolicyInterface
+}
+
+// CiliumLocalRedirectPolicyInterface has methods to work with CiliumLocalRedirectPolicy resources.
+type CiliumLocalRedirectPolicyInterface interface {
+ Create(ctx context.Context, ciliumLocalRedirectPolicy *v2.CiliumLocalRedirectPolicy, opts v1.CreateOptions) (*v2.CiliumLocalRedirectPolicy, error)
+ Update(ctx context.Context, ciliumLocalRedirectPolicy *v2.CiliumLocalRedirectPolicy, opts v1.UpdateOptions) (*v2.CiliumLocalRedirectPolicy, error)
+ UpdateStatus(ctx context.Context, ciliumLocalRedirectPolicy *v2.CiliumLocalRedirectPolicy, opts v1.UpdateOptions) (*v2.CiliumLocalRedirectPolicy, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*v2.CiliumLocalRedirectPolicy, error)
+ List(ctx context.Context, opts v1.ListOptions) (*v2.CiliumLocalRedirectPolicyList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.CiliumLocalRedirectPolicy, err error)
+ CiliumLocalRedirectPolicyExpansion
+}
+
+// ciliumLocalRedirectPolicies implements CiliumLocalRedirectPolicyInterface
+type ciliumLocalRedirectPolicies struct {
+ client rest.Interface
+ ns string
+}
+
+// newCiliumLocalRedirectPolicies returns a CiliumLocalRedirectPolicies
+func newCiliumLocalRedirectPolicies(c *CiliumV2Client, namespace string) *ciliumLocalRedirectPolicies {
+ return &ciliumLocalRedirectPolicies{
+ client: c.RESTClient(),
+ ns: namespace,
+ }
+}
+
+// Get takes name of the ciliumLocalRedirectPolicy, and returns the corresponding ciliumLocalRedirectPolicy object, and an error if there is any.
+func (c *ciliumLocalRedirectPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2.CiliumLocalRedirectPolicy, err error) {
+ result = &v2.CiliumLocalRedirectPolicy{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("ciliumlocalredirectpolicies").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of CiliumLocalRedirectPolicies that match those selectors.
+func (c *ciliumLocalRedirectPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v2.CiliumLocalRedirectPolicyList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v2.CiliumLocalRedirectPolicyList{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("ciliumlocalredirectpolicies").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested ciliumLocalRedirectPolicies.
+func (c *ciliumLocalRedirectPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Namespace(c.ns).
+ Resource("ciliumlocalredirectpolicies").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a ciliumLocalRedirectPolicy and creates it. Returns the server's representation of the ciliumLocalRedirectPolicy, and an error, if there is any.
+func (c *ciliumLocalRedirectPolicies) Create(ctx context.Context, ciliumLocalRedirectPolicy *v2.CiliumLocalRedirectPolicy, opts v1.CreateOptions) (result *v2.CiliumLocalRedirectPolicy, err error) {
+ result = &v2.CiliumLocalRedirectPolicy{}
+ err = c.client.Post().
+ Namespace(c.ns).
+ Resource("ciliumlocalredirectpolicies").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(ciliumLocalRedirectPolicy).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a ciliumLocalRedirectPolicy and updates it. Returns the server's representation of the ciliumLocalRedirectPolicy, and an error, if there is any.
+func (c *ciliumLocalRedirectPolicies) Update(ctx context.Context, ciliumLocalRedirectPolicy *v2.CiliumLocalRedirectPolicy, opts v1.UpdateOptions) (result *v2.CiliumLocalRedirectPolicy, err error) {
+ result = &v2.CiliumLocalRedirectPolicy{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("ciliumlocalredirectpolicies").
+ Name(ciliumLocalRedirectPolicy.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(ciliumLocalRedirectPolicy).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *ciliumLocalRedirectPolicies) UpdateStatus(ctx context.Context, ciliumLocalRedirectPolicy *v2.CiliumLocalRedirectPolicy, opts v1.UpdateOptions) (result *v2.CiliumLocalRedirectPolicy, err error) {
+ result = &v2.CiliumLocalRedirectPolicy{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("ciliumlocalredirectpolicies").
+ Name(ciliumLocalRedirectPolicy.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(ciliumLocalRedirectPolicy).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the ciliumLocalRedirectPolicy and deletes it. Returns an error if one occurs.
+func (c *ciliumLocalRedirectPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("ciliumlocalredirectpolicies").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *ciliumLocalRedirectPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("ciliumlocalredirectpolicies").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched ciliumLocalRedirectPolicy.
+func (c *ciliumLocalRedirectPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.CiliumLocalRedirectPolicy, err error) {
+ result = &v2.CiliumLocalRedirectPolicy{}
+ err = c.client.Patch(pt).
+ Namespace(c.ns).
+ Resource("ciliumlocalredirectpolicies").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/ciliumnetworkpolicy.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/ciliumnetworkpolicy.go
new file mode 100644
index 000000000..f5d5ae60a
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/ciliumnetworkpolicy.go
@@ -0,0 +1,182 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v2
+
+import (
+ "context"
+ "time"
+
+ v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2"
+ scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// CiliumNetworkPoliciesGetter has a method to return a CiliumNetworkPolicyInterface.
+// A group's client should implement this interface.
+type CiliumNetworkPoliciesGetter interface {
+ CiliumNetworkPolicies(namespace string) CiliumNetworkPolicyInterface
+}
+
+// CiliumNetworkPolicyInterface has methods to work with CiliumNetworkPolicy resources.
+type CiliumNetworkPolicyInterface interface {
+ Create(ctx context.Context, ciliumNetworkPolicy *v2.CiliumNetworkPolicy, opts v1.CreateOptions) (*v2.CiliumNetworkPolicy, error)
+ Update(ctx context.Context, ciliumNetworkPolicy *v2.CiliumNetworkPolicy, opts v1.UpdateOptions) (*v2.CiliumNetworkPolicy, error)
+ UpdateStatus(ctx context.Context, ciliumNetworkPolicy *v2.CiliumNetworkPolicy, opts v1.UpdateOptions) (*v2.CiliumNetworkPolicy, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*v2.CiliumNetworkPolicy, error)
+ List(ctx context.Context, opts v1.ListOptions) (*v2.CiliumNetworkPolicyList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.CiliumNetworkPolicy, err error)
+ CiliumNetworkPolicyExpansion
+}
+
+// ciliumNetworkPolicies implements CiliumNetworkPolicyInterface
+type ciliumNetworkPolicies struct {
+ client rest.Interface
+ ns string
+}
+
+// newCiliumNetworkPolicies returns a CiliumNetworkPolicies
+func newCiliumNetworkPolicies(c *CiliumV2Client, namespace string) *ciliumNetworkPolicies {
+ return &ciliumNetworkPolicies{
+ client: c.RESTClient(),
+ ns: namespace,
+ }
+}
+
+// Get takes name of the ciliumNetworkPolicy, and returns the corresponding ciliumNetworkPolicy object, and an error if there is any.
+func (c *ciliumNetworkPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2.CiliumNetworkPolicy, err error) {
+ result = &v2.CiliumNetworkPolicy{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("ciliumnetworkpolicies").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of CiliumNetworkPolicies that match those selectors.
+func (c *ciliumNetworkPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v2.CiliumNetworkPolicyList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v2.CiliumNetworkPolicyList{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("ciliumnetworkpolicies").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested ciliumNetworkPolicies.
+func (c *ciliumNetworkPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Namespace(c.ns).
+ Resource("ciliumnetworkpolicies").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a ciliumNetworkPolicy and creates it. Returns the server's representation of the ciliumNetworkPolicy, and an error, if there is any.
+func (c *ciliumNetworkPolicies) Create(ctx context.Context, ciliumNetworkPolicy *v2.CiliumNetworkPolicy, opts v1.CreateOptions) (result *v2.CiliumNetworkPolicy, err error) {
+ result = &v2.CiliumNetworkPolicy{}
+ err = c.client.Post().
+ Namespace(c.ns).
+ Resource("ciliumnetworkpolicies").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(ciliumNetworkPolicy).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a ciliumNetworkPolicy and updates it. Returns the server's representation of the ciliumNetworkPolicy, and an error, if there is any.
+func (c *ciliumNetworkPolicies) Update(ctx context.Context, ciliumNetworkPolicy *v2.CiliumNetworkPolicy, opts v1.UpdateOptions) (result *v2.CiliumNetworkPolicy, err error) {
+ result = &v2.CiliumNetworkPolicy{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("ciliumnetworkpolicies").
+ Name(ciliumNetworkPolicy.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(ciliumNetworkPolicy).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *ciliumNetworkPolicies) UpdateStatus(ctx context.Context, ciliumNetworkPolicy *v2.CiliumNetworkPolicy, opts v1.UpdateOptions) (result *v2.CiliumNetworkPolicy, err error) {
+ result = &v2.CiliumNetworkPolicy{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("ciliumnetworkpolicies").
+ Name(ciliumNetworkPolicy.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(ciliumNetworkPolicy).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the ciliumNetworkPolicy and deletes it. Returns an error if one occurs.
+func (c *ciliumNetworkPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("ciliumnetworkpolicies").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *ciliumNetworkPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("ciliumnetworkpolicies").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched ciliumNetworkPolicy.
+func (c *ciliumNetworkPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.CiliumNetworkPolicy, err error) {
+ result = &v2.CiliumNetworkPolicy{}
+ err = c.client.Patch(pt).
+ Namespace(c.ns).
+ Resource("ciliumnetworkpolicies").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/ciliumnode.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/ciliumnode.go
new file mode 100644
index 000000000..9cc2e5555
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/ciliumnode.go
@@ -0,0 +1,171 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v2
+
+import (
+ "context"
+ "time"
+
+ v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2"
+ scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// CiliumNodesGetter has a method to return a CiliumNodeInterface.
+// A group's client should implement this interface.
+type CiliumNodesGetter interface {
+ CiliumNodes() CiliumNodeInterface
+}
+
+// CiliumNodeInterface has methods to work with CiliumNode resources.
+type CiliumNodeInterface interface {
+ Create(ctx context.Context, ciliumNode *v2.CiliumNode, opts v1.CreateOptions) (*v2.CiliumNode, error)
+ Update(ctx context.Context, ciliumNode *v2.CiliumNode, opts v1.UpdateOptions) (*v2.CiliumNode, error)
+ UpdateStatus(ctx context.Context, ciliumNode *v2.CiliumNode, opts v1.UpdateOptions) (*v2.CiliumNode, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*v2.CiliumNode, error)
+ List(ctx context.Context, opts v1.ListOptions) (*v2.CiliumNodeList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.CiliumNode, err error)
+ CiliumNodeExpansion
+}
+
+// ciliumNodes implements CiliumNodeInterface
+type ciliumNodes struct {
+ client rest.Interface
+}
+
+// newCiliumNodes returns a CiliumNodes
+func newCiliumNodes(c *CiliumV2Client) *ciliumNodes {
+ return &ciliumNodes{
+ client: c.RESTClient(),
+ }
+}
+
+// Get takes name of the ciliumNode, and returns the corresponding ciliumNode object, and an error if there is any.
+func (c *ciliumNodes) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2.CiliumNode, err error) {
+ result = &v2.CiliumNode{}
+ err = c.client.Get().
+ Resource("ciliumnodes").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of CiliumNodes that match those selectors.
+func (c *ciliumNodes) List(ctx context.Context, opts v1.ListOptions) (result *v2.CiliumNodeList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v2.CiliumNodeList{}
+ err = c.client.Get().
+ Resource("ciliumnodes").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested ciliumNodes.
+func (c *ciliumNodes) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Resource("ciliumnodes").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a ciliumNode and creates it. Returns the server's representation of the ciliumNode, and an error, if there is any.
+func (c *ciliumNodes) Create(ctx context.Context, ciliumNode *v2.CiliumNode, opts v1.CreateOptions) (result *v2.CiliumNode, err error) {
+ result = &v2.CiliumNode{}
+ err = c.client.Post().
+ Resource("ciliumnodes").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(ciliumNode).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a ciliumNode and updates it. Returns the server's representation of the ciliumNode, and an error, if there is any.
+func (c *ciliumNodes) Update(ctx context.Context, ciliumNode *v2.CiliumNode, opts v1.UpdateOptions) (result *v2.CiliumNode, err error) {
+ result = &v2.CiliumNode{}
+ err = c.client.Put().
+ Resource("ciliumnodes").
+ Name(ciliumNode.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(ciliumNode).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *ciliumNodes) UpdateStatus(ctx context.Context, ciliumNode *v2.CiliumNode, opts v1.UpdateOptions) (result *v2.CiliumNode, err error) {
+ result = &v2.CiliumNode{}
+ err = c.client.Put().
+ Resource("ciliumnodes").
+ Name(ciliumNode.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(ciliumNode).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the ciliumNode and deletes it. Returns an error if one occurs.
+func (c *ciliumNodes) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ return c.client.Delete().
+ Resource("ciliumnodes").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *ciliumNodes) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Resource("ciliumnodes").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched ciliumNode.
+func (c *ciliumNodes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.CiliumNode, err error) {
+ result = &v2.CiliumNode{}
+ err = c.client.Patch(pt).
+ Resource("ciliumnodes").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/doc.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/doc.go
new file mode 100644
index 000000000..b3757ef24
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/doc.go
@@ -0,0 +1,7 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated typed clients.
+package v2
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/doc.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/doc.go
new file mode 100644
index 000000000..57bd090ef
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/doc.go
@@ -0,0 +1,7 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// Package fake has the automatically generated clients.
+package fake
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_cilium.io_client.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_cilium.io_client.go
new file mode 100644
index 000000000..9ed059f64
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_cilium.io_client.go
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v2 "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2"
+ rest "k8s.io/client-go/rest"
+ testing "k8s.io/client-go/testing"
+)
+
+type FakeCiliumV2 struct {
+ *testing.Fake
+}
+
+func (c *FakeCiliumV2) CiliumClusterwideEnvoyConfigs() v2.CiliumClusterwideEnvoyConfigInterface {
+ return &FakeCiliumClusterwideEnvoyConfigs{c}
+}
+
+func (c *FakeCiliumV2) CiliumClusterwideNetworkPolicies() v2.CiliumClusterwideNetworkPolicyInterface {
+ return &FakeCiliumClusterwideNetworkPolicies{c}
+}
+
+func (c *FakeCiliumV2) CiliumEgressGatewayPolicies() v2.CiliumEgressGatewayPolicyInterface {
+ return &FakeCiliumEgressGatewayPolicies{c}
+}
+
+func (c *FakeCiliumV2) CiliumEndpoints(namespace string) v2.CiliumEndpointInterface {
+ return &FakeCiliumEndpoints{c, namespace}
+}
+
+func (c *FakeCiliumV2) CiliumEnvoyConfigs(namespace string) v2.CiliumEnvoyConfigInterface {
+ return &FakeCiliumEnvoyConfigs{c, namespace}
+}
+
+func (c *FakeCiliumV2) CiliumExternalWorkloads() v2.CiliumExternalWorkloadInterface {
+ return &FakeCiliumExternalWorkloads{c}
+}
+
+func (c *FakeCiliumV2) CiliumIdentities() v2.CiliumIdentityInterface {
+ return &FakeCiliumIdentities{c}
+}
+
+func (c *FakeCiliumV2) CiliumLocalRedirectPolicies(namespace string) v2.CiliumLocalRedirectPolicyInterface {
+ return &FakeCiliumLocalRedirectPolicies{c, namespace}
+}
+
+func (c *FakeCiliumV2) CiliumNetworkPolicies(namespace string) v2.CiliumNetworkPolicyInterface {
+ return &FakeCiliumNetworkPolicies{c, namespace}
+}
+
+func (c *FakeCiliumV2) CiliumNodes() v2.CiliumNodeInterface {
+ return &FakeCiliumNodes{c}
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *FakeCiliumV2) RESTClient() rest.Interface {
+ var ret *rest.RESTClient
+ return ret
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_ciliumclusterwideenvoyconfig.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_ciliumclusterwideenvoyconfig.go
new file mode 100644
index 000000000..ecabad97a
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_ciliumclusterwideenvoyconfig.go
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ "context"
+
+ v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeCiliumClusterwideEnvoyConfigs implements CiliumClusterwideEnvoyConfigInterface
+type FakeCiliumClusterwideEnvoyConfigs struct {
+ Fake *FakeCiliumV2
+}
+
+var ciliumclusterwideenvoyconfigsResource = v2.SchemeGroupVersion.WithResource("ciliumclusterwideenvoyconfigs")
+
+var ciliumclusterwideenvoyconfigsKind = v2.SchemeGroupVersion.WithKind("CiliumClusterwideEnvoyConfig")
+
+// Get takes name of the ciliumClusterwideEnvoyConfig, and returns the corresponding ciliumClusterwideEnvoyConfig object, and an error if there is any.
+func (c *FakeCiliumClusterwideEnvoyConfigs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2.CiliumClusterwideEnvoyConfig, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootGetAction(ciliumclusterwideenvoyconfigsResource, name), &v2.CiliumClusterwideEnvoyConfig{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2.CiliumClusterwideEnvoyConfig), err
+}
+
+// List takes label and field selectors, and returns the list of CiliumClusterwideEnvoyConfigs that match those selectors.
+func (c *FakeCiliumClusterwideEnvoyConfigs) List(ctx context.Context, opts v1.ListOptions) (result *v2.CiliumClusterwideEnvoyConfigList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootListAction(ciliumclusterwideenvoyconfigsResource, ciliumclusterwideenvoyconfigsKind, opts), &v2.CiliumClusterwideEnvoyConfigList{})
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v2.CiliumClusterwideEnvoyConfigList{ListMeta: obj.(*v2.CiliumClusterwideEnvoyConfigList).ListMeta}
+ for _, item := range obj.(*v2.CiliumClusterwideEnvoyConfigList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested ciliumClusterwideEnvoyConfigs.
+func (c *FakeCiliumClusterwideEnvoyConfigs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewRootWatchAction(ciliumclusterwideenvoyconfigsResource, opts))
+}
+
+// Create takes the representation of a ciliumClusterwideEnvoyConfig and creates it. Returns the server's representation of the ciliumClusterwideEnvoyConfig, and an error, if there is any.
+func (c *FakeCiliumClusterwideEnvoyConfigs) Create(ctx context.Context, ciliumClusterwideEnvoyConfig *v2.CiliumClusterwideEnvoyConfig, opts v1.CreateOptions) (result *v2.CiliumClusterwideEnvoyConfig, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootCreateAction(ciliumclusterwideenvoyconfigsResource, ciliumClusterwideEnvoyConfig), &v2.CiliumClusterwideEnvoyConfig{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2.CiliumClusterwideEnvoyConfig), err
+}
+
+// Update takes the representation of a ciliumClusterwideEnvoyConfig and updates it. Returns the server's representation of the ciliumClusterwideEnvoyConfig, and an error, if there is any.
+func (c *FakeCiliumClusterwideEnvoyConfigs) Update(ctx context.Context, ciliumClusterwideEnvoyConfig *v2.CiliumClusterwideEnvoyConfig, opts v1.UpdateOptions) (result *v2.CiliumClusterwideEnvoyConfig, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootUpdateAction(ciliumclusterwideenvoyconfigsResource, ciliumClusterwideEnvoyConfig), &v2.CiliumClusterwideEnvoyConfig{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2.CiliumClusterwideEnvoyConfig), err
+}
+
+// Delete takes name of the ciliumClusterwideEnvoyConfig and deletes it. Returns an error if one occurs.
+func (c *FakeCiliumClusterwideEnvoyConfigs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewRootDeleteActionWithOptions(ciliumclusterwideenvoyconfigsResource, name, opts), &v2.CiliumClusterwideEnvoyConfig{})
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeCiliumClusterwideEnvoyConfigs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ action := testing.NewRootDeleteCollectionAction(ciliumclusterwideenvoyconfigsResource, listOpts)
+
+ _, err := c.Fake.Invokes(action, &v2.CiliumClusterwideEnvoyConfigList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched ciliumClusterwideEnvoyConfig.
+func (c *FakeCiliumClusterwideEnvoyConfigs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.CiliumClusterwideEnvoyConfig, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootPatchSubresourceAction(ciliumclusterwideenvoyconfigsResource, name, pt, data, subresources...), &v2.CiliumClusterwideEnvoyConfig{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2.CiliumClusterwideEnvoyConfig), err
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_ciliumclusterwidenetworkpolicy.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_ciliumclusterwidenetworkpolicy.go
new file mode 100644
index 000000000..d3d2be624
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_ciliumclusterwidenetworkpolicy.go
@@ -0,0 +1,119 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ "context"
+
+ v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeCiliumClusterwideNetworkPolicies implements CiliumClusterwideNetworkPolicyInterface
+type FakeCiliumClusterwideNetworkPolicies struct {
+ Fake *FakeCiliumV2
+}
+
+var ciliumclusterwidenetworkpoliciesResource = v2.SchemeGroupVersion.WithResource("ciliumclusterwidenetworkpolicies")
+
+var ciliumclusterwidenetworkpoliciesKind = v2.SchemeGroupVersion.WithKind("CiliumClusterwideNetworkPolicy")
+
+// Get takes name of the ciliumClusterwideNetworkPolicy, and returns the corresponding ciliumClusterwideNetworkPolicy object, and an error if there is any.
+func (c *FakeCiliumClusterwideNetworkPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2.CiliumClusterwideNetworkPolicy, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootGetAction(ciliumclusterwidenetworkpoliciesResource, name), &v2.CiliumClusterwideNetworkPolicy{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2.CiliumClusterwideNetworkPolicy), err
+}
+
+// List takes label and field selectors, and returns the list of CiliumClusterwideNetworkPolicies that match those selectors.
+func (c *FakeCiliumClusterwideNetworkPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v2.CiliumClusterwideNetworkPolicyList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootListAction(ciliumclusterwidenetworkpoliciesResource, ciliumclusterwidenetworkpoliciesKind, opts), &v2.CiliumClusterwideNetworkPolicyList{})
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v2.CiliumClusterwideNetworkPolicyList{ListMeta: obj.(*v2.CiliumClusterwideNetworkPolicyList).ListMeta}
+ for _, item := range obj.(*v2.CiliumClusterwideNetworkPolicyList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested ciliumClusterwideNetworkPolicies.
+func (c *FakeCiliumClusterwideNetworkPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewRootWatchAction(ciliumclusterwidenetworkpoliciesResource, opts))
+}
+
+// Create takes the representation of a ciliumClusterwideNetworkPolicy and creates it. Returns the server's representation of the ciliumClusterwideNetworkPolicy, and an error, if there is any.
+func (c *FakeCiliumClusterwideNetworkPolicies) Create(ctx context.Context, ciliumClusterwideNetworkPolicy *v2.CiliumClusterwideNetworkPolicy, opts v1.CreateOptions) (result *v2.CiliumClusterwideNetworkPolicy, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootCreateAction(ciliumclusterwidenetworkpoliciesResource, ciliumClusterwideNetworkPolicy), &v2.CiliumClusterwideNetworkPolicy{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2.CiliumClusterwideNetworkPolicy), err
+}
+
+// Update takes the representation of a ciliumClusterwideNetworkPolicy and updates it. Returns the server's representation of the ciliumClusterwideNetworkPolicy, and an error, if there is any.
+func (c *FakeCiliumClusterwideNetworkPolicies) Update(ctx context.Context, ciliumClusterwideNetworkPolicy *v2.CiliumClusterwideNetworkPolicy, opts v1.UpdateOptions) (result *v2.CiliumClusterwideNetworkPolicy, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootUpdateAction(ciliumclusterwidenetworkpoliciesResource, ciliumClusterwideNetworkPolicy), &v2.CiliumClusterwideNetworkPolicy{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2.CiliumClusterwideNetworkPolicy), err
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *FakeCiliumClusterwideNetworkPolicies) UpdateStatus(ctx context.Context, ciliumClusterwideNetworkPolicy *v2.CiliumClusterwideNetworkPolicy, opts v1.UpdateOptions) (*v2.CiliumClusterwideNetworkPolicy, error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootUpdateSubresourceAction(ciliumclusterwidenetworkpoliciesResource, "status", ciliumClusterwideNetworkPolicy), &v2.CiliumClusterwideNetworkPolicy{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2.CiliumClusterwideNetworkPolicy), err
+}
+
+// Delete takes name of the ciliumClusterwideNetworkPolicy and deletes it. Returns an error if one occurs.
+func (c *FakeCiliumClusterwideNetworkPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewRootDeleteActionWithOptions(ciliumclusterwidenetworkpoliciesResource, name, opts), &v2.CiliumClusterwideNetworkPolicy{})
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeCiliumClusterwideNetworkPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ action := testing.NewRootDeleteCollectionAction(ciliumclusterwidenetworkpoliciesResource, listOpts)
+
+ _, err := c.Fake.Invokes(action, &v2.CiliumClusterwideNetworkPolicyList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched ciliumClusterwideNetworkPolicy.
+func (c *FakeCiliumClusterwideNetworkPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.CiliumClusterwideNetworkPolicy, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootPatchSubresourceAction(ciliumclusterwidenetworkpoliciesResource, name, pt, data, subresources...), &v2.CiliumClusterwideNetworkPolicy{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2.CiliumClusterwideNetworkPolicy), err
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_ciliumegressgatewaypolicy.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_ciliumegressgatewaypolicy.go
new file mode 100644
index 000000000..993ab79e2
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_ciliumegressgatewaypolicy.go
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ "context"
+
+ v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeCiliumEgressGatewayPolicies implements CiliumEgressGatewayPolicyInterface
+type FakeCiliumEgressGatewayPolicies struct {
+ Fake *FakeCiliumV2
+}
+
+var ciliumegressgatewaypoliciesResource = v2.SchemeGroupVersion.WithResource("ciliumegressgatewaypolicies")
+
+var ciliumegressgatewaypoliciesKind = v2.SchemeGroupVersion.WithKind("CiliumEgressGatewayPolicy")
+
+// Get takes name of the ciliumEgressGatewayPolicy, and returns the corresponding ciliumEgressGatewayPolicy object, and an error if there is any.
+func (c *FakeCiliumEgressGatewayPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2.CiliumEgressGatewayPolicy, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootGetAction(ciliumegressgatewaypoliciesResource, name), &v2.CiliumEgressGatewayPolicy{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2.CiliumEgressGatewayPolicy), err
+}
+
+// List takes label and field selectors, and returns the list of CiliumEgressGatewayPolicies that match those selectors.
+func (c *FakeCiliumEgressGatewayPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v2.CiliumEgressGatewayPolicyList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootListAction(ciliumegressgatewaypoliciesResource, ciliumegressgatewaypoliciesKind, opts), &v2.CiliumEgressGatewayPolicyList{})
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v2.CiliumEgressGatewayPolicyList{ListMeta: obj.(*v2.CiliumEgressGatewayPolicyList).ListMeta}
+ for _, item := range obj.(*v2.CiliumEgressGatewayPolicyList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested ciliumEgressGatewayPolicies.
+func (c *FakeCiliumEgressGatewayPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewRootWatchAction(ciliumegressgatewaypoliciesResource, opts))
+}
+
+// Create takes the representation of a ciliumEgressGatewayPolicy and creates it. Returns the server's representation of the ciliumEgressGatewayPolicy, and an error, if there is any.
+func (c *FakeCiliumEgressGatewayPolicies) Create(ctx context.Context, ciliumEgressGatewayPolicy *v2.CiliumEgressGatewayPolicy, opts v1.CreateOptions) (result *v2.CiliumEgressGatewayPolicy, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootCreateAction(ciliumegressgatewaypoliciesResource, ciliumEgressGatewayPolicy), &v2.CiliumEgressGatewayPolicy{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2.CiliumEgressGatewayPolicy), err
+}
+
+// Update takes the representation of a ciliumEgressGatewayPolicy and updates it. Returns the server's representation of the ciliumEgressGatewayPolicy, and an error, if there is any.
+func (c *FakeCiliumEgressGatewayPolicies) Update(ctx context.Context, ciliumEgressGatewayPolicy *v2.CiliumEgressGatewayPolicy, opts v1.UpdateOptions) (result *v2.CiliumEgressGatewayPolicy, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootUpdateAction(ciliumegressgatewaypoliciesResource, ciliumEgressGatewayPolicy), &v2.CiliumEgressGatewayPolicy{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2.CiliumEgressGatewayPolicy), err
+}
+
+// Delete takes name of the ciliumEgressGatewayPolicy and deletes it. Returns an error if one occurs.
+func (c *FakeCiliumEgressGatewayPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewRootDeleteActionWithOptions(ciliumegressgatewaypoliciesResource, name, opts), &v2.CiliumEgressGatewayPolicy{})
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeCiliumEgressGatewayPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ action := testing.NewRootDeleteCollectionAction(ciliumegressgatewaypoliciesResource, listOpts)
+
+ _, err := c.Fake.Invokes(action, &v2.CiliumEgressGatewayPolicyList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched ciliumEgressGatewayPolicy.
+func (c *FakeCiliumEgressGatewayPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.CiliumEgressGatewayPolicy, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootPatchSubresourceAction(ciliumegressgatewaypoliciesResource, name, pt, data, subresources...), &v2.CiliumEgressGatewayPolicy{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2.CiliumEgressGatewayPolicy), err
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_ciliumendpoint.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_ciliumendpoint.go
new file mode 100644
index 000000000..5ec15c48b
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_ciliumendpoint.go
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ "context"
+
+ v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeCiliumEndpoints implements CiliumEndpointInterface
+type FakeCiliumEndpoints struct {
+ Fake *FakeCiliumV2
+ ns string
+}
+
+var ciliumendpointsResource = v2.SchemeGroupVersion.WithResource("ciliumendpoints")
+
+var ciliumendpointsKind = v2.SchemeGroupVersion.WithKind("CiliumEndpoint")
+
+// Get takes name of the ciliumEndpoint, and returns the corresponding ciliumEndpoint object, and an error if there is any.
+func (c *FakeCiliumEndpoints) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2.CiliumEndpoint, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(ciliumendpointsResource, c.ns, name), &v2.CiliumEndpoint{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2.CiliumEndpoint), err
+}
+
+// List takes label and field selectors, and returns the list of CiliumEndpoints that match those selectors.
+func (c *FakeCiliumEndpoints) List(ctx context.Context, opts v1.ListOptions) (result *v2.CiliumEndpointList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(ciliumendpointsResource, ciliumendpointsKind, c.ns, opts), &v2.CiliumEndpointList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v2.CiliumEndpointList{ListMeta: obj.(*v2.CiliumEndpointList).ListMeta}
+ for _, item := range obj.(*v2.CiliumEndpointList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested ciliumEndpoints.
+func (c *FakeCiliumEndpoints) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(ciliumendpointsResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a ciliumEndpoint and creates it. Returns the server's representation of the ciliumEndpoint, and an error, if there is any.
+func (c *FakeCiliumEndpoints) Create(ctx context.Context, ciliumEndpoint *v2.CiliumEndpoint, opts v1.CreateOptions) (result *v2.CiliumEndpoint, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(ciliumendpointsResource, c.ns, ciliumEndpoint), &v2.CiliumEndpoint{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2.CiliumEndpoint), err
+}
+
+// Update takes the representation of a ciliumEndpoint and updates it. Returns the server's representation of the ciliumEndpoint, and an error, if there is any.
+func (c *FakeCiliumEndpoints) Update(ctx context.Context, ciliumEndpoint *v2.CiliumEndpoint, opts v1.UpdateOptions) (result *v2.CiliumEndpoint, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(ciliumendpointsResource, c.ns, ciliumEndpoint), &v2.CiliumEndpoint{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2.CiliumEndpoint), err
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *FakeCiliumEndpoints) UpdateStatus(ctx context.Context, ciliumEndpoint *v2.CiliumEndpoint, opts v1.UpdateOptions) (*v2.CiliumEndpoint, error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateSubresourceAction(ciliumendpointsResource, "status", c.ns, ciliumEndpoint), &v2.CiliumEndpoint{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2.CiliumEndpoint), err
+}
+
+// Delete takes name of the ciliumEndpoint and deletes it. Returns an error if one occurs.
+func (c *FakeCiliumEndpoints) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteActionWithOptions(ciliumendpointsResource, c.ns, name, opts), &v2.CiliumEndpoint{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeCiliumEndpoints) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(ciliumendpointsResource, c.ns, listOpts)
+
+ _, err := c.Fake.Invokes(action, &v2.CiliumEndpointList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched ciliumEndpoint.
+func (c *FakeCiliumEndpoints) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.CiliumEndpoint, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(ciliumendpointsResource, c.ns, name, pt, data, subresources...), &v2.CiliumEndpoint{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2.CiliumEndpoint), err
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_ciliumenvoyconfig.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_ciliumenvoyconfig.go
new file mode 100644
index 000000000..fa0592695
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_ciliumenvoyconfig.go
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ "context"
+
+ v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeCiliumEnvoyConfigs implements CiliumEnvoyConfigInterface
+type FakeCiliumEnvoyConfigs struct {
+ Fake *FakeCiliumV2
+ ns string
+}
+
+var ciliumenvoyconfigsResource = v2.SchemeGroupVersion.WithResource("ciliumenvoyconfigs")
+
+var ciliumenvoyconfigsKind = v2.SchemeGroupVersion.WithKind("CiliumEnvoyConfig")
+
+// Get takes name of the ciliumEnvoyConfig, and returns the corresponding ciliumEnvoyConfig object, and an error if there is any.
+func (c *FakeCiliumEnvoyConfigs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2.CiliumEnvoyConfig, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(ciliumenvoyconfigsResource, c.ns, name), &v2.CiliumEnvoyConfig{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2.CiliumEnvoyConfig), err
+}
+
+// List takes label and field selectors, and returns the list of CiliumEnvoyConfigs that match those selectors.
+func (c *FakeCiliumEnvoyConfigs) List(ctx context.Context, opts v1.ListOptions) (result *v2.CiliumEnvoyConfigList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(ciliumenvoyconfigsResource, ciliumenvoyconfigsKind, c.ns, opts), &v2.CiliumEnvoyConfigList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v2.CiliumEnvoyConfigList{ListMeta: obj.(*v2.CiliumEnvoyConfigList).ListMeta}
+ for _, item := range obj.(*v2.CiliumEnvoyConfigList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested ciliumEnvoyConfigs.
+func (c *FakeCiliumEnvoyConfigs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(ciliumenvoyconfigsResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a ciliumEnvoyConfig and creates it. Returns the server's representation of the ciliumEnvoyConfig, and an error, if there is any.
+func (c *FakeCiliumEnvoyConfigs) Create(ctx context.Context, ciliumEnvoyConfig *v2.CiliumEnvoyConfig, opts v1.CreateOptions) (result *v2.CiliumEnvoyConfig, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(ciliumenvoyconfigsResource, c.ns, ciliumEnvoyConfig), &v2.CiliumEnvoyConfig{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2.CiliumEnvoyConfig), err
+}
+
+// Update takes the representation of a ciliumEnvoyConfig and updates it. Returns the server's representation of the ciliumEnvoyConfig, and an error, if there is any.
+func (c *FakeCiliumEnvoyConfigs) Update(ctx context.Context, ciliumEnvoyConfig *v2.CiliumEnvoyConfig, opts v1.UpdateOptions) (result *v2.CiliumEnvoyConfig, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(ciliumenvoyconfigsResource, c.ns, ciliumEnvoyConfig), &v2.CiliumEnvoyConfig{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2.CiliumEnvoyConfig), err
+}
+
+// Delete takes name of the ciliumEnvoyConfig and deletes it. Returns an error if one occurs.
+func (c *FakeCiliumEnvoyConfigs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteActionWithOptions(ciliumenvoyconfigsResource, c.ns, name, opts), &v2.CiliumEnvoyConfig{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeCiliumEnvoyConfigs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(ciliumenvoyconfigsResource, c.ns, listOpts)
+
+ _, err := c.Fake.Invokes(action, &v2.CiliumEnvoyConfigList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched ciliumEnvoyConfig.
+func (c *FakeCiliumEnvoyConfigs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.CiliumEnvoyConfig, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(ciliumenvoyconfigsResource, c.ns, name, pt, data, subresources...), &v2.CiliumEnvoyConfig{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2.CiliumEnvoyConfig), err
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_ciliumexternalworkload.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_ciliumexternalworkload.go
new file mode 100644
index 000000000..e947be5f1
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_ciliumexternalworkload.go
@@ -0,0 +1,119 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ "context"
+
+ v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeCiliumExternalWorkloads implements CiliumExternalWorkloadInterface
+type FakeCiliumExternalWorkloads struct {
+ Fake *FakeCiliumV2
+}
+
+var ciliumexternalworkloadsResource = v2.SchemeGroupVersion.WithResource("ciliumexternalworkloads")
+
+var ciliumexternalworkloadsKind = v2.SchemeGroupVersion.WithKind("CiliumExternalWorkload")
+
+// Get takes name of the ciliumExternalWorkload, and returns the corresponding ciliumExternalWorkload object, and an error if there is any.
+func (c *FakeCiliumExternalWorkloads) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2.CiliumExternalWorkload, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootGetAction(ciliumexternalworkloadsResource, name), &v2.CiliumExternalWorkload{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2.CiliumExternalWorkload), err
+}
+
+// List takes label and field selectors, and returns the list of CiliumExternalWorkloads that match those selectors.
+func (c *FakeCiliumExternalWorkloads) List(ctx context.Context, opts v1.ListOptions) (result *v2.CiliumExternalWorkloadList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootListAction(ciliumexternalworkloadsResource, ciliumexternalworkloadsKind, opts), &v2.CiliumExternalWorkloadList{})
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v2.CiliumExternalWorkloadList{ListMeta: obj.(*v2.CiliumExternalWorkloadList).ListMeta}
+ for _, item := range obj.(*v2.CiliumExternalWorkloadList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested ciliumExternalWorkloads.
+func (c *FakeCiliumExternalWorkloads) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewRootWatchAction(ciliumexternalworkloadsResource, opts))
+}
+
+// Create takes the representation of a ciliumExternalWorkload and creates it. Returns the server's representation of the ciliumExternalWorkload, and an error, if there is any.
+func (c *FakeCiliumExternalWorkloads) Create(ctx context.Context, ciliumExternalWorkload *v2.CiliumExternalWorkload, opts v1.CreateOptions) (result *v2.CiliumExternalWorkload, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootCreateAction(ciliumexternalworkloadsResource, ciliumExternalWorkload), &v2.CiliumExternalWorkload{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2.CiliumExternalWorkload), err
+}
+
+// Update takes the representation of a ciliumExternalWorkload and updates it. Returns the server's representation of the ciliumExternalWorkload, and an error, if there is any.
+func (c *FakeCiliumExternalWorkloads) Update(ctx context.Context, ciliumExternalWorkload *v2.CiliumExternalWorkload, opts v1.UpdateOptions) (result *v2.CiliumExternalWorkload, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootUpdateAction(ciliumexternalworkloadsResource, ciliumExternalWorkload), &v2.CiliumExternalWorkload{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2.CiliumExternalWorkload), err
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *FakeCiliumExternalWorkloads) UpdateStatus(ctx context.Context, ciliumExternalWorkload *v2.CiliumExternalWorkload, opts v1.UpdateOptions) (*v2.CiliumExternalWorkload, error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootUpdateSubresourceAction(ciliumexternalworkloadsResource, "status", ciliumExternalWorkload), &v2.CiliumExternalWorkload{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2.CiliumExternalWorkload), err
+}
+
+// Delete takes name of the ciliumExternalWorkload and deletes it. Returns an error if one occurs.
+func (c *FakeCiliumExternalWorkloads) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewRootDeleteActionWithOptions(ciliumexternalworkloadsResource, name, opts), &v2.CiliumExternalWorkload{})
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeCiliumExternalWorkloads) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ action := testing.NewRootDeleteCollectionAction(ciliumexternalworkloadsResource, listOpts)
+
+ _, err := c.Fake.Invokes(action, &v2.CiliumExternalWorkloadList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched ciliumExternalWorkload.
+func (c *FakeCiliumExternalWorkloads) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.CiliumExternalWorkload, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootPatchSubresourceAction(ciliumexternalworkloadsResource, name, pt, data, subresources...), &v2.CiliumExternalWorkload{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2.CiliumExternalWorkload), err
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_ciliumidentity.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_ciliumidentity.go
new file mode 100644
index 000000000..745193c7d
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_ciliumidentity.go
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ "context"
+
+ v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeCiliumIdentities implements CiliumIdentityInterface
+type FakeCiliumIdentities struct {
+ Fake *FakeCiliumV2
+}
+
+var ciliumidentitiesResource = v2.SchemeGroupVersion.WithResource("ciliumidentities")
+
+var ciliumidentitiesKind = v2.SchemeGroupVersion.WithKind("CiliumIdentity")
+
+// Get takes name of the ciliumIdentity, and returns the corresponding ciliumIdentity object, and an error if there is any.
+func (c *FakeCiliumIdentities) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2.CiliumIdentity, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootGetAction(ciliumidentitiesResource, name), &v2.CiliumIdentity{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2.CiliumIdentity), err
+}
+
+// List takes label and field selectors, and returns the list of CiliumIdentities that match those selectors.
+func (c *FakeCiliumIdentities) List(ctx context.Context, opts v1.ListOptions) (result *v2.CiliumIdentityList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootListAction(ciliumidentitiesResource, ciliumidentitiesKind, opts), &v2.CiliumIdentityList{})
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v2.CiliumIdentityList{ListMeta: obj.(*v2.CiliumIdentityList).ListMeta}
+ for _, item := range obj.(*v2.CiliumIdentityList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested ciliumIdentities.
+func (c *FakeCiliumIdentities) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewRootWatchAction(ciliumidentitiesResource, opts))
+}
+
+// Create takes the representation of a ciliumIdentity and creates it. Returns the server's representation of the ciliumIdentity, and an error, if there is any.
+func (c *FakeCiliumIdentities) Create(ctx context.Context, ciliumIdentity *v2.CiliumIdentity, opts v1.CreateOptions) (result *v2.CiliumIdentity, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootCreateAction(ciliumidentitiesResource, ciliumIdentity), &v2.CiliumIdentity{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2.CiliumIdentity), err
+}
+
+// Update takes the representation of a ciliumIdentity and updates it. Returns the server's representation of the ciliumIdentity, and an error, if there is any.
+func (c *FakeCiliumIdentities) Update(ctx context.Context, ciliumIdentity *v2.CiliumIdentity, opts v1.UpdateOptions) (result *v2.CiliumIdentity, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootUpdateAction(ciliumidentitiesResource, ciliumIdentity), &v2.CiliumIdentity{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2.CiliumIdentity), err
+}
+
+// Delete takes name of the ciliumIdentity and deletes it. Returns an error if one occurs.
+func (c *FakeCiliumIdentities) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewRootDeleteActionWithOptions(ciliumidentitiesResource, name, opts), &v2.CiliumIdentity{})
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeCiliumIdentities) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ action := testing.NewRootDeleteCollectionAction(ciliumidentitiesResource, listOpts)
+
+ _, err := c.Fake.Invokes(action, &v2.CiliumIdentityList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched ciliumIdentity.
+func (c *FakeCiliumIdentities) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.CiliumIdentity, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootPatchSubresourceAction(ciliumidentitiesResource, name, pt, data, subresources...), &v2.CiliumIdentity{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2.CiliumIdentity), err
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_ciliumlocalredirectpolicy.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_ciliumlocalredirectpolicy.go
new file mode 100644
index 000000000..308309ee1
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_ciliumlocalredirectpolicy.go
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ "context"
+
+ v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeCiliumLocalRedirectPolicies implements CiliumLocalRedirectPolicyInterface
+type FakeCiliumLocalRedirectPolicies struct {
+ Fake *FakeCiliumV2
+ ns string
+}
+
+var ciliumlocalredirectpoliciesResource = v2.SchemeGroupVersion.WithResource("ciliumlocalredirectpolicies")
+
+var ciliumlocalredirectpoliciesKind = v2.SchemeGroupVersion.WithKind("CiliumLocalRedirectPolicy")
+
+// Get takes name of the ciliumLocalRedirectPolicy, and returns the corresponding ciliumLocalRedirectPolicy object, and an error if there is any.
+func (c *FakeCiliumLocalRedirectPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2.CiliumLocalRedirectPolicy, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(ciliumlocalredirectpoliciesResource, c.ns, name), &v2.CiliumLocalRedirectPolicy{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2.CiliumLocalRedirectPolicy), err
+}
+
+// List takes label and field selectors, and returns the list of CiliumLocalRedirectPolicies that match those selectors.
+func (c *FakeCiliumLocalRedirectPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v2.CiliumLocalRedirectPolicyList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(ciliumlocalredirectpoliciesResource, ciliumlocalredirectpoliciesKind, c.ns, opts), &v2.CiliumLocalRedirectPolicyList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v2.CiliumLocalRedirectPolicyList{ListMeta: obj.(*v2.CiliumLocalRedirectPolicyList).ListMeta}
+ for _, item := range obj.(*v2.CiliumLocalRedirectPolicyList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested ciliumLocalRedirectPolicies.
+func (c *FakeCiliumLocalRedirectPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(ciliumlocalredirectpoliciesResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a ciliumLocalRedirectPolicy and creates it. Returns the server's representation of the ciliumLocalRedirectPolicy, and an error, if there is any.
+func (c *FakeCiliumLocalRedirectPolicies) Create(ctx context.Context, ciliumLocalRedirectPolicy *v2.CiliumLocalRedirectPolicy, opts v1.CreateOptions) (result *v2.CiliumLocalRedirectPolicy, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(ciliumlocalredirectpoliciesResource, c.ns, ciliumLocalRedirectPolicy), &v2.CiliumLocalRedirectPolicy{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2.CiliumLocalRedirectPolicy), err
+}
+
+// Update takes the representation of a ciliumLocalRedirectPolicy and updates it. Returns the server's representation of the ciliumLocalRedirectPolicy, and an error, if there is any.
+func (c *FakeCiliumLocalRedirectPolicies) Update(ctx context.Context, ciliumLocalRedirectPolicy *v2.CiliumLocalRedirectPolicy, opts v1.UpdateOptions) (result *v2.CiliumLocalRedirectPolicy, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(ciliumlocalredirectpoliciesResource, c.ns, ciliumLocalRedirectPolicy), &v2.CiliumLocalRedirectPolicy{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2.CiliumLocalRedirectPolicy), err
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *FakeCiliumLocalRedirectPolicies) UpdateStatus(ctx context.Context, ciliumLocalRedirectPolicy *v2.CiliumLocalRedirectPolicy, opts v1.UpdateOptions) (*v2.CiliumLocalRedirectPolicy, error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateSubresourceAction(ciliumlocalredirectpoliciesResource, "status", c.ns, ciliumLocalRedirectPolicy), &v2.CiliumLocalRedirectPolicy{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2.CiliumLocalRedirectPolicy), err
+}
+
+// Delete takes name of the ciliumLocalRedirectPolicy and deletes it. Returns an error if one occurs.
+func (c *FakeCiliumLocalRedirectPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteActionWithOptions(ciliumlocalredirectpoliciesResource, c.ns, name, opts), &v2.CiliumLocalRedirectPolicy{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeCiliumLocalRedirectPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(ciliumlocalredirectpoliciesResource, c.ns, listOpts)
+
+ _, err := c.Fake.Invokes(action, &v2.CiliumLocalRedirectPolicyList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched ciliumLocalRedirectPolicy.
+func (c *FakeCiliumLocalRedirectPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.CiliumLocalRedirectPolicy, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(ciliumlocalredirectpoliciesResource, c.ns, name, pt, data, subresources...), &v2.CiliumLocalRedirectPolicy{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2.CiliumLocalRedirectPolicy), err
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_ciliumnetworkpolicy.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_ciliumnetworkpolicy.go
new file mode 100644
index 000000000..b40b9740e
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_ciliumnetworkpolicy.go
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ "context"
+
+ v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeCiliumNetworkPolicies implements CiliumNetworkPolicyInterface
+type FakeCiliumNetworkPolicies struct {
+ Fake *FakeCiliumV2
+ ns string
+}
+
+var ciliumnetworkpoliciesResource = v2.SchemeGroupVersion.WithResource("ciliumnetworkpolicies")
+
+var ciliumnetworkpoliciesKind = v2.SchemeGroupVersion.WithKind("CiliumNetworkPolicy")
+
+// Get takes name of the ciliumNetworkPolicy, and returns the corresponding ciliumNetworkPolicy object, and an error if there is any.
+func (c *FakeCiliumNetworkPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2.CiliumNetworkPolicy, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(ciliumnetworkpoliciesResource, c.ns, name), &v2.CiliumNetworkPolicy{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2.CiliumNetworkPolicy), err
+}
+
+// List takes label and field selectors, and returns the list of CiliumNetworkPolicies that match those selectors.
+func (c *FakeCiliumNetworkPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v2.CiliumNetworkPolicyList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(ciliumnetworkpoliciesResource, ciliumnetworkpoliciesKind, c.ns, opts), &v2.CiliumNetworkPolicyList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v2.CiliumNetworkPolicyList{ListMeta: obj.(*v2.CiliumNetworkPolicyList).ListMeta}
+ for _, item := range obj.(*v2.CiliumNetworkPolicyList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested ciliumNetworkPolicies.
+func (c *FakeCiliumNetworkPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(ciliumnetworkpoliciesResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a ciliumNetworkPolicy and creates it. Returns the server's representation of the ciliumNetworkPolicy, and an error, if there is any.
+func (c *FakeCiliumNetworkPolicies) Create(ctx context.Context, ciliumNetworkPolicy *v2.CiliumNetworkPolicy, opts v1.CreateOptions) (result *v2.CiliumNetworkPolicy, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(ciliumnetworkpoliciesResource, c.ns, ciliumNetworkPolicy), &v2.CiliumNetworkPolicy{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2.CiliumNetworkPolicy), err
+}
+
+// Update takes the representation of a ciliumNetworkPolicy and updates it. Returns the server's representation of the ciliumNetworkPolicy, and an error, if there is any.
+func (c *FakeCiliumNetworkPolicies) Update(ctx context.Context, ciliumNetworkPolicy *v2.CiliumNetworkPolicy, opts v1.UpdateOptions) (result *v2.CiliumNetworkPolicy, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(ciliumnetworkpoliciesResource, c.ns, ciliumNetworkPolicy), &v2.CiliumNetworkPolicy{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2.CiliumNetworkPolicy), err
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *FakeCiliumNetworkPolicies) UpdateStatus(ctx context.Context, ciliumNetworkPolicy *v2.CiliumNetworkPolicy, opts v1.UpdateOptions) (*v2.CiliumNetworkPolicy, error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateSubresourceAction(ciliumnetworkpoliciesResource, "status", c.ns, ciliumNetworkPolicy), &v2.CiliumNetworkPolicy{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2.CiliumNetworkPolicy), err
+}
+
+// Delete takes name of the ciliumNetworkPolicy and deletes it. Returns an error if one occurs.
+func (c *FakeCiliumNetworkPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteActionWithOptions(ciliumnetworkpoliciesResource, c.ns, name, opts), &v2.CiliumNetworkPolicy{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeCiliumNetworkPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(ciliumnetworkpoliciesResource, c.ns, listOpts)
+
+ _, err := c.Fake.Invokes(action, &v2.CiliumNetworkPolicyList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched ciliumNetworkPolicy.
+func (c *FakeCiliumNetworkPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.CiliumNetworkPolicy, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(ciliumnetworkpoliciesResource, c.ns, name, pt, data, subresources...), &v2.CiliumNetworkPolicy{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2.CiliumNetworkPolicy), err
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_ciliumnode.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_ciliumnode.go
new file mode 100644
index 000000000..73e91b1f9
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/fake/fake_ciliumnode.go
@@ -0,0 +1,119 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ "context"
+
+ v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeCiliumNodes implements CiliumNodeInterface
+type FakeCiliumNodes struct {
+ Fake *FakeCiliumV2
+}
+
+var ciliumnodesResource = v2.SchemeGroupVersion.WithResource("ciliumnodes")
+
+var ciliumnodesKind = v2.SchemeGroupVersion.WithKind("CiliumNode")
+
+// Get takes name of the ciliumNode, and returns the corresponding ciliumNode object, and an error if there is any.
+func (c *FakeCiliumNodes) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2.CiliumNode, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootGetAction(ciliumnodesResource, name), &v2.CiliumNode{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2.CiliumNode), err
+}
+
+// List takes label and field selectors, and returns the list of CiliumNodes that match those selectors.
+func (c *FakeCiliumNodes) List(ctx context.Context, opts v1.ListOptions) (result *v2.CiliumNodeList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootListAction(ciliumnodesResource, ciliumnodesKind, opts), &v2.CiliumNodeList{})
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v2.CiliumNodeList{ListMeta: obj.(*v2.CiliumNodeList).ListMeta}
+ for _, item := range obj.(*v2.CiliumNodeList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested ciliumNodes.
+func (c *FakeCiliumNodes) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewRootWatchAction(ciliumnodesResource, opts))
+}
+
+// Create takes the representation of a ciliumNode and creates it. Returns the server's representation of the ciliumNode, and an error, if there is any.
+func (c *FakeCiliumNodes) Create(ctx context.Context, ciliumNode *v2.CiliumNode, opts v1.CreateOptions) (result *v2.CiliumNode, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootCreateAction(ciliumnodesResource, ciliumNode), &v2.CiliumNode{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2.CiliumNode), err
+}
+
+// Update takes the representation of a ciliumNode and updates it. Returns the server's representation of the ciliumNode, and an error, if there is any.
+func (c *FakeCiliumNodes) Update(ctx context.Context, ciliumNode *v2.CiliumNode, opts v1.UpdateOptions) (result *v2.CiliumNode, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootUpdateAction(ciliumnodesResource, ciliumNode), &v2.CiliumNode{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2.CiliumNode), err
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *FakeCiliumNodes) UpdateStatus(ctx context.Context, ciliumNode *v2.CiliumNode, opts v1.UpdateOptions) (*v2.CiliumNode, error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootUpdateSubresourceAction(ciliumnodesResource, "status", ciliumNode), &v2.CiliumNode{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2.CiliumNode), err
+}
+
+// Delete takes name of the ciliumNode and deletes it. Returns an error if one occurs.
+func (c *FakeCiliumNodes) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewRootDeleteActionWithOptions(ciliumnodesResource, name, opts), &v2.CiliumNode{})
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeCiliumNodes) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ action := testing.NewRootDeleteCollectionAction(ciliumnodesResource, listOpts)
+
+ _, err := c.Fake.Invokes(action, &v2.CiliumNodeList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched ciliumNode.
+func (c *FakeCiliumNodes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.CiliumNode, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootPatchSubresourceAction(ciliumnodesResource, name, pt, data, subresources...), &v2.CiliumNode{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2.CiliumNode), err
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/generated_expansion.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/generated_expansion.go
new file mode 100644
index 000000000..15b434bac
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/generated_expansion.go
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v2
+
+type CiliumClusterwideEnvoyConfigExpansion interface{}
+
+type CiliumClusterwideNetworkPolicyExpansion interface{}
+
+type CiliumEgressGatewayPolicyExpansion interface{}
+
+type CiliumEndpointExpansion interface{}
+
+type CiliumEnvoyConfigExpansion interface{}
+
+type CiliumExternalWorkloadExpansion interface{}
+
+type CiliumIdentityExpansion interface{}
+
+type CiliumLocalRedirectPolicyExpansion interface{}
+
+type CiliumNetworkPolicyExpansion interface{}
+
+type CiliumNodeExpansion interface{}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/cilium.io_client.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/cilium.io_client.go
new file mode 100644
index 000000000..0e56f4591
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/cilium.io_client.go
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v2alpha1
+
+import (
+ "net/http"
+
+ v2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1"
+ "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme"
+ rest "k8s.io/client-go/rest"
+)
+
+type CiliumV2alpha1Interface interface {
+ RESTClient() rest.Interface
+ CiliumBGPPeeringPoliciesGetter
+ CiliumCIDRGroupsGetter
+ CiliumEndpointSlicesGetter
+ CiliumL2AnnouncementPoliciesGetter
+ CiliumLoadBalancerIPPoolsGetter
+ CiliumNodeConfigsGetter
+ CiliumPodIPPoolsGetter
+}
+
+// CiliumV2alpha1Client is used to interact with features provided by the cilium.io group.
+type CiliumV2alpha1Client struct {
+ restClient rest.Interface
+}
+
+func (c *CiliumV2alpha1Client) CiliumBGPPeeringPolicies() CiliumBGPPeeringPolicyInterface {
+ return newCiliumBGPPeeringPolicies(c)
+}
+
+func (c *CiliumV2alpha1Client) CiliumCIDRGroups() CiliumCIDRGroupInterface {
+ return newCiliumCIDRGroups(c)
+}
+
+func (c *CiliumV2alpha1Client) CiliumEndpointSlices() CiliumEndpointSliceInterface {
+ return newCiliumEndpointSlices(c)
+}
+
+func (c *CiliumV2alpha1Client) CiliumL2AnnouncementPolicies() CiliumL2AnnouncementPolicyInterface {
+ return newCiliumL2AnnouncementPolicies(c)
+}
+
+func (c *CiliumV2alpha1Client) CiliumLoadBalancerIPPools() CiliumLoadBalancerIPPoolInterface {
+ return newCiliumLoadBalancerIPPools(c)
+}
+
+func (c *CiliumV2alpha1Client) CiliumNodeConfigs(namespace string) CiliumNodeConfigInterface {
+ return newCiliumNodeConfigs(c, namespace)
+}
+
+func (c *CiliumV2alpha1Client) CiliumPodIPPools() CiliumPodIPPoolInterface {
+ return newCiliumPodIPPools(c)
+}
+
+// NewForConfig creates a new CiliumV2alpha1Client for the given config.
+// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
+// where httpClient was generated with rest.HTTPClientFor(c).
+func NewForConfig(c *rest.Config) (*CiliumV2alpha1Client, error) {
+ config := *c
+ if err := setConfigDefaults(&config); err != nil {
+ return nil, err
+ }
+ httpClient, err := rest.HTTPClientFor(&config)
+ if err != nil {
+ return nil, err
+ }
+ return NewForConfigAndClient(&config, httpClient)
+}
+
+// NewForConfigAndClient creates a new CiliumV2alpha1Client for the given config and http client.
+// Note the http client provided takes precedence over the configured transport values.
+func NewForConfigAndClient(c *rest.Config, h *http.Client) (*CiliumV2alpha1Client, error) {
+ config := *c
+ if err := setConfigDefaults(&config); err != nil {
+ return nil, err
+ }
+ client, err := rest.RESTClientForConfigAndClient(&config, h)
+ if err != nil {
+ return nil, err
+ }
+ return &CiliumV2alpha1Client{client}, nil
+}
+
+// NewForConfigOrDie creates a new CiliumV2alpha1Client for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *CiliumV2alpha1Client {
+ client, err := NewForConfig(c)
+ if err != nil {
+ panic(err)
+ }
+ return client
+}
+
+// New creates a new CiliumV2alpha1Client for the given RESTClient.
+func New(c rest.Interface) *CiliumV2alpha1Client {
+ return &CiliumV2alpha1Client{c}
+}
+
+func setConfigDefaults(config *rest.Config) error {
+ gv := v2alpha1.SchemeGroupVersion
+ config.GroupVersion = &gv
+ config.APIPath = "/apis"
+ config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+
+ if config.UserAgent == "" {
+ config.UserAgent = rest.DefaultKubernetesUserAgent()
+ }
+
+ return nil
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *CiliumV2alpha1Client) RESTClient() rest.Interface {
+ if c == nil {
+ return nil
+ }
+ return c.restClient
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumbgppeeringpolicy.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumbgppeeringpolicy.go
new file mode 100644
index 000000000..e19727c07
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumbgppeeringpolicy.go
@@ -0,0 +1,155 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v2alpha1
+
+import (
+ "context"
+ "time"
+
+ v2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1"
+ scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// CiliumBGPPeeringPoliciesGetter has a method to return a CiliumBGPPeeringPolicyInterface.
+// A group's client should implement this interface.
+type CiliumBGPPeeringPoliciesGetter interface {
+ CiliumBGPPeeringPolicies() CiliumBGPPeeringPolicyInterface
+}
+
+// CiliumBGPPeeringPolicyInterface has methods to work with CiliumBGPPeeringPolicy resources.
+type CiliumBGPPeeringPolicyInterface interface {
+ Create(ctx context.Context, ciliumBGPPeeringPolicy *v2alpha1.CiliumBGPPeeringPolicy, opts v1.CreateOptions) (*v2alpha1.CiliumBGPPeeringPolicy, error)
+ Update(ctx context.Context, ciliumBGPPeeringPolicy *v2alpha1.CiliumBGPPeeringPolicy, opts v1.UpdateOptions) (*v2alpha1.CiliumBGPPeeringPolicy, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*v2alpha1.CiliumBGPPeeringPolicy, error)
+ List(ctx context.Context, opts v1.ListOptions) (*v2alpha1.CiliumBGPPeeringPolicyList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2alpha1.CiliumBGPPeeringPolicy, err error)
+ CiliumBGPPeeringPolicyExpansion
+}
+
+// ciliumBGPPeeringPolicies implements CiliumBGPPeeringPolicyInterface
+type ciliumBGPPeeringPolicies struct {
+ client rest.Interface
+}
+
+// newCiliumBGPPeeringPolicies returns a CiliumBGPPeeringPolicies
+func newCiliumBGPPeeringPolicies(c *CiliumV2alpha1Client) *ciliumBGPPeeringPolicies {
+ return &ciliumBGPPeeringPolicies{
+ client: c.RESTClient(),
+ }
+}
+
+// Get takes name of the ciliumBGPPeeringPolicy, and returns the corresponding ciliumBGPPeeringPolicy object, and an error if there is any.
+func (c *ciliumBGPPeeringPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2alpha1.CiliumBGPPeeringPolicy, err error) {
+ result = &v2alpha1.CiliumBGPPeeringPolicy{}
+ err = c.client.Get().
+ Resource("ciliumbgppeeringpolicies").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of CiliumBGPPeeringPolicies that match those selectors.
+func (c *ciliumBGPPeeringPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v2alpha1.CiliumBGPPeeringPolicyList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v2alpha1.CiliumBGPPeeringPolicyList{}
+ err = c.client.Get().
+ Resource("ciliumbgppeeringpolicies").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested ciliumBGPPeeringPolicies.
+func (c *ciliumBGPPeeringPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Resource("ciliumbgppeeringpolicies").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a ciliumBGPPeeringPolicy and creates it. Returns the server's representation of the ciliumBGPPeeringPolicy, and an error, if there is any.
+func (c *ciliumBGPPeeringPolicies) Create(ctx context.Context, ciliumBGPPeeringPolicy *v2alpha1.CiliumBGPPeeringPolicy, opts v1.CreateOptions) (result *v2alpha1.CiliumBGPPeeringPolicy, err error) {
+ result = &v2alpha1.CiliumBGPPeeringPolicy{}
+ err = c.client.Post().
+ Resource("ciliumbgppeeringpolicies").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(ciliumBGPPeeringPolicy).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a ciliumBGPPeeringPolicy and updates it. Returns the server's representation of the ciliumBGPPeeringPolicy, and an error, if there is any.
+func (c *ciliumBGPPeeringPolicies) Update(ctx context.Context, ciliumBGPPeeringPolicy *v2alpha1.CiliumBGPPeeringPolicy, opts v1.UpdateOptions) (result *v2alpha1.CiliumBGPPeeringPolicy, err error) {
+ result = &v2alpha1.CiliumBGPPeeringPolicy{}
+ err = c.client.Put().
+ Resource("ciliumbgppeeringpolicies").
+ Name(ciliumBGPPeeringPolicy.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(ciliumBGPPeeringPolicy).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the ciliumBGPPeeringPolicy and deletes it. Returns an error if one occurs.
+func (c *ciliumBGPPeeringPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ return c.client.Delete().
+ Resource("ciliumbgppeeringpolicies").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *ciliumBGPPeeringPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Resource("ciliumbgppeeringpolicies").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched ciliumBGPPeeringPolicy.
+func (c *ciliumBGPPeeringPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2alpha1.CiliumBGPPeeringPolicy, err error) {
+ result = &v2alpha1.CiliumBGPPeeringPolicy{}
+ err = c.client.Patch(pt).
+ Resource("ciliumbgppeeringpolicies").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumcidrgroup.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumcidrgroup.go
new file mode 100644
index 000000000..42f93365b
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumcidrgroup.go
@@ -0,0 +1,155 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v2alpha1
+
+import (
+ "context"
+ "time"
+
+ v2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1"
+ scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// CiliumCIDRGroupsGetter has a method to return a CiliumCIDRGroupInterface.
+// A group's client should implement this interface.
+type CiliumCIDRGroupsGetter interface {
+ CiliumCIDRGroups() CiliumCIDRGroupInterface
+}
+
+// CiliumCIDRGroupInterface has methods to work with CiliumCIDRGroup resources.
+type CiliumCIDRGroupInterface interface {
+ Create(ctx context.Context, ciliumCIDRGroup *v2alpha1.CiliumCIDRGroup, opts v1.CreateOptions) (*v2alpha1.CiliumCIDRGroup, error)
+ Update(ctx context.Context, ciliumCIDRGroup *v2alpha1.CiliumCIDRGroup, opts v1.UpdateOptions) (*v2alpha1.CiliumCIDRGroup, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*v2alpha1.CiliumCIDRGroup, error)
+ List(ctx context.Context, opts v1.ListOptions) (*v2alpha1.CiliumCIDRGroupList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2alpha1.CiliumCIDRGroup, err error)
+ CiliumCIDRGroupExpansion
+}
+
+// ciliumCIDRGroups implements CiliumCIDRGroupInterface
+type ciliumCIDRGroups struct {
+ client rest.Interface
+}
+
+// newCiliumCIDRGroups returns a CiliumCIDRGroups
+func newCiliumCIDRGroups(c *CiliumV2alpha1Client) *ciliumCIDRGroups {
+ return &ciliumCIDRGroups{
+ client: c.RESTClient(),
+ }
+}
+
+// Get takes name of the ciliumCIDRGroup, and returns the corresponding ciliumCIDRGroup object, and an error if there is any.
+func (c *ciliumCIDRGroups) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2alpha1.CiliumCIDRGroup, err error) {
+ result = &v2alpha1.CiliumCIDRGroup{}
+ err = c.client.Get().
+ Resource("ciliumcidrgroups").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of CiliumCIDRGroups that match those selectors.
+func (c *ciliumCIDRGroups) List(ctx context.Context, opts v1.ListOptions) (result *v2alpha1.CiliumCIDRGroupList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v2alpha1.CiliumCIDRGroupList{}
+ err = c.client.Get().
+ Resource("ciliumcidrgroups").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested ciliumCIDRGroups.
+func (c *ciliumCIDRGroups) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Resource("ciliumcidrgroups").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a ciliumCIDRGroup and creates it. Returns the server's representation of the ciliumCIDRGroup, and an error, if there is any.
+func (c *ciliumCIDRGroups) Create(ctx context.Context, ciliumCIDRGroup *v2alpha1.CiliumCIDRGroup, opts v1.CreateOptions) (result *v2alpha1.CiliumCIDRGroup, err error) {
+ result = &v2alpha1.CiliumCIDRGroup{}
+ err = c.client.Post().
+ Resource("ciliumcidrgroups").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(ciliumCIDRGroup).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a ciliumCIDRGroup and updates it. Returns the server's representation of the ciliumCIDRGroup, and an error, if there is any.
+func (c *ciliumCIDRGroups) Update(ctx context.Context, ciliumCIDRGroup *v2alpha1.CiliumCIDRGroup, opts v1.UpdateOptions) (result *v2alpha1.CiliumCIDRGroup, err error) {
+ result = &v2alpha1.CiliumCIDRGroup{}
+ err = c.client.Put().
+ Resource("ciliumcidrgroups").
+ Name(ciliumCIDRGroup.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(ciliumCIDRGroup).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the ciliumCIDRGroup and deletes it. Returns an error if one occurs.
+func (c *ciliumCIDRGroups) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ return c.client.Delete().
+ Resource("ciliumcidrgroups").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *ciliumCIDRGroups) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Resource("ciliumcidrgroups").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched ciliumCIDRGroup.
+func (c *ciliumCIDRGroups) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2alpha1.CiliumCIDRGroup, err error) {
+ result = &v2alpha1.CiliumCIDRGroup{}
+ err = c.client.Patch(pt).
+ Resource("ciliumcidrgroups").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumendpointslice.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumendpointslice.go
new file mode 100644
index 000000000..55cdaf11b
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumendpointslice.go
@@ -0,0 +1,155 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v2alpha1
+
+import (
+ "context"
+ "time"
+
+ v2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1"
+ scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// CiliumEndpointSlicesGetter has a method to return a CiliumEndpointSliceInterface.
+// A group's client should implement this interface.
+type CiliumEndpointSlicesGetter interface {
+ CiliumEndpointSlices() CiliumEndpointSliceInterface
+}
+
+// CiliumEndpointSliceInterface has methods to work with CiliumEndpointSlice resources.
+type CiliumEndpointSliceInterface interface {
+ Create(ctx context.Context, ciliumEndpointSlice *v2alpha1.CiliumEndpointSlice, opts v1.CreateOptions) (*v2alpha1.CiliumEndpointSlice, error)
+ Update(ctx context.Context, ciliumEndpointSlice *v2alpha1.CiliumEndpointSlice, opts v1.UpdateOptions) (*v2alpha1.CiliumEndpointSlice, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*v2alpha1.CiliumEndpointSlice, error)
+ List(ctx context.Context, opts v1.ListOptions) (*v2alpha1.CiliumEndpointSliceList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2alpha1.CiliumEndpointSlice, err error)
+ CiliumEndpointSliceExpansion
+}
+
+// ciliumEndpointSlices implements CiliumEndpointSliceInterface
+type ciliumEndpointSlices struct {
+ client rest.Interface
+}
+
+// newCiliumEndpointSlices returns a CiliumEndpointSlices
+func newCiliumEndpointSlices(c *CiliumV2alpha1Client) *ciliumEndpointSlices {
+ return &ciliumEndpointSlices{
+ client: c.RESTClient(),
+ }
+}
+
+// Get takes name of the ciliumEndpointSlice, and returns the corresponding ciliumEndpointSlice object, and an error if there is any.
+func (c *ciliumEndpointSlices) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2alpha1.CiliumEndpointSlice, err error) {
+ result = &v2alpha1.CiliumEndpointSlice{}
+ err = c.client.Get().
+ Resource("ciliumendpointslices").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of CiliumEndpointSlices that match those selectors.
+func (c *ciliumEndpointSlices) List(ctx context.Context, opts v1.ListOptions) (result *v2alpha1.CiliumEndpointSliceList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v2alpha1.CiliumEndpointSliceList{}
+ err = c.client.Get().
+ Resource("ciliumendpointslices").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested ciliumEndpointSlices.
+func (c *ciliumEndpointSlices) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Resource("ciliumendpointslices").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a ciliumEndpointSlice and creates it. Returns the server's representation of the ciliumEndpointSlice, and an error, if there is any.
+func (c *ciliumEndpointSlices) Create(ctx context.Context, ciliumEndpointSlice *v2alpha1.CiliumEndpointSlice, opts v1.CreateOptions) (result *v2alpha1.CiliumEndpointSlice, err error) {
+ result = &v2alpha1.CiliumEndpointSlice{}
+ err = c.client.Post().
+ Resource("ciliumendpointslices").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(ciliumEndpointSlice).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a ciliumEndpointSlice and updates it. Returns the server's representation of the ciliumEndpointSlice, and an error, if there is any.
+func (c *ciliumEndpointSlices) Update(ctx context.Context, ciliumEndpointSlice *v2alpha1.CiliumEndpointSlice, opts v1.UpdateOptions) (result *v2alpha1.CiliumEndpointSlice, err error) {
+ result = &v2alpha1.CiliumEndpointSlice{}
+ err = c.client.Put().
+ Resource("ciliumendpointslices").
+ Name(ciliumEndpointSlice.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(ciliumEndpointSlice).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the ciliumEndpointSlice and deletes it. Returns an error if one occurs.
+func (c *ciliumEndpointSlices) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ return c.client.Delete().
+ Resource("ciliumendpointslices").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *ciliumEndpointSlices) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Resource("ciliumendpointslices").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched ciliumEndpointSlice.
+func (c *ciliumEndpointSlices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2alpha1.CiliumEndpointSlice, err error) {
+ result = &v2alpha1.CiliumEndpointSlice{}
+ err = c.client.Patch(pt).
+ Resource("ciliumendpointslices").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliuml2announcementpolicy.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliuml2announcementpolicy.go
new file mode 100644
index 000000000..bed50df46
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliuml2announcementpolicy.go
@@ -0,0 +1,171 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v2alpha1
+
+import (
+ "context"
+ "time"
+
+ v2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1"
+ scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// CiliumL2AnnouncementPoliciesGetter has a method to return a CiliumL2AnnouncementPolicyInterface.
+// A group's client should implement this interface.
+type CiliumL2AnnouncementPoliciesGetter interface {
+ CiliumL2AnnouncementPolicies() CiliumL2AnnouncementPolicyInterface
+}
+
+// CiliumL2AnnouncementPolicyInterface has methods to work with CiliumL2AnnouncementPolicy resources.
+type CiliumL2AnnouncementPolicyInterface interface {
+ Create(ctx context.Context, ciliumL2AnnouncementPolicy *v2alpha1.CiliumL2AnnouncementPolicy, opts v1.CreateOptions) (*v2alpha1.CiliumL2AnnouncementPolicy, error)
+ Update(ctx context.Context, ciliumL2AnnouncementPolicy *v2alpha1.CiliumL2AnnouncementPolicy, opts v1.UpdateOptions) (*v2alpha1.CiliumL2AnnouncementPolicy, error)
+ UpdateStatus(ctx context.Context, ciliumL2AnnouncementPolicy *v2alpha1.CiliumL2AnnouncementPolicy, opts v1.UpdateOptions) (*v2alpha1.CiliumL2AnnouncementPolicy, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*v2alpha1.CiliumL2AnnouncementPolicy, error)
+ List(ctx context.Context, opts v1.ListOptions) (*v2alpha1.CiliumL2AnnouncementPolicyList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2alpha1.CiliumL2AnnouncementPolicy, err error)
+ CiliumL2AnnouncementPolicyExpansion
+}
+
+// ciliumL2AnnouncementPolicies implements CiliumL2AnnouncementPolicyInterface
+type ciliumL2AnnouncementPolicies struct {
+ client rest.Interface
+}
+
+// newCiliumL2AnnouncementPolicies returns a CiliumL2AnnouncementPolicies
+func newCiliumL2AnnouncementPolicies(c *CiliumV2alpha1Client) *ciliumL2AnnouncementPolicies {
+ return &ciliumL2AnnouncementPolicies{
+ client: c.RESTClient(),
+ }
+}
+
+// Get takes name of the ciliumL2AnnouncementPolicy, and returns the corresponding ciliumL2AnnouncementPolicy object, and an error if there is any.
+func (c *ciliumL2AnnouncementPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2alpha1.CiliumL2AnnouncementPolicy, err error) {
+ result = &v2alpha1.CiliumL2AnnouncementPolicy{}
+ err = c.client.Get().
+ Resource("ciliuml2announcementpolicies").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of CiliumL2AnnouncementPolicies that match those selectors.
+func (c *ciliumL2AnnouncementPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v2alpha1.CiliumL2AnnouncementPolicyList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v2alpha1.CiliumL2AnnouncementPolicyList{}
+ err = c.client.Get().
+ Resource("ciliuml2announcementpolicies").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested ciliumL2AnnouncementPolicies.
+func (c *ciliumL2AnnouncementPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Resource("ciliuml2announcementpolicies").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a ciliumL2AnnouncementPolicy and creates it. Returns the server's representation of the ciliumL2AnnouncementPolicy, and an error, if there is any.
+func (c *ciliumL2AnnouncementPolicies) Create(ctx context.Context, ciliumL2AnnouncementPolicy *v2alpha1.CiliumL2AnnouncementPolicy, opts v1.CreateOptions) (result *v2alpha1.CiliumL2AnnouncementPolicy, err error) {
+ result = &v2alpha1.CiliumL2AnnouncementPolicy{}
+ err = c.client.Post().
+ Resource("ciliuml2announcementpolicies").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(ciliumL2AnnouncementPolicy).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a ciliumL2AnnouncementPolicy and updates it. Returns the server's representation of the ciliumL2AnnouncementPolicy, and an error, if there is any.
+func (c *ciliumL2AnnouncementPolicies) Update(ctx context.Context, ciliumL2AnnouncementPolicy *v2alpha1.CiliumL2AnnouncementPolicy, opts v1.UpdateOptions) (result *v2alpha1.CiliumL2AnnouncementPolicy, err error) {
+ result = &v2alpha1.CiliumL2AnnouncementPolicy{}
+ err = c.client.Put().
+ Resource("ciliuml2announcementpolicies").
+ Name(ciliumL2AnnouncementPolicy.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(ciliumL2AnnouncementPolicy).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *ciliumL2AnnouncementPolicies) UpdateStatus(ctx context.Context, ciliumL2AnnouncementPolicy *v2alpha1.CiliumL2AnnouncementPolicy, opts v1.UpdateOptions) (result *v2alpha1.CiliumL2AnnouncementPolicy, err error) {
+ result = &v2alpha1.CiliumL2AnnouncementPolicy{}
+ err = c.client.Put().
+ Resource("ciliuml2announcementpolicies").
+ Name(ciliumL2AnnouncementPolicy.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(ciliumL2AnnouncementPolicy).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the ciliumL2AnnouncementPolicy and deletes it. Returns an error if one occurs.
+func (c *ciliumL2AnnouncementPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ return c.client.Delete().
+ Resource("ciliuml2announcementpolicies").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *ciliumL2AnnouncementPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Resource("ciliuml2announcementpolicies").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched ciliumL2AnnouncementPolicy.
+func (c *ciliumL2AnnouncementPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2alpha1.CiliumL2AnnouncementPolicy, err error) {
+ result = &v2alpha1.CiliumL2AnnouncementPolicy{}
+ err = c.client.Patch(pt).
+ Resource("ciliuml2announcementpolicies").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumloadbalancerippool.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumloadbalancerippool.go
new file mode 100644
index 000000000..bdc76f4a5
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumloadbalancerippool.go
@@ -0,0 +1,171 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v2alpha1
+
+import (
+ "context"
+ "time"
+
+ v2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1"
+ scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// CiliumLoadBalancerIPPoolsGetter has a method to return a CiliumLoadBalancerIPPoolInterface.
+// A group's client should implement this interface.
+type CiliumLoadBalancerIPPoolsGetter interface {
+ CiliumLoadBalancerIPPools() CiliumLoadBalancerIPPoolInterface
+}
+
+// CiliumLoadBalancerIPPoolInterface has methods to work with CiliumLoadBalancerIPPool resources.
+type CiliumLoadBalancerIPPoolInterface interface {
+ Create(ctx context.Context, ciliumLoadBalancerIPPool *v2alpha1.CiliumLoadBalancerIPPool, opts v1.CreateOptions) (*v2alpha1.CiliumLoadBalancerIPPool, error)
+ Update(ctx context.Context, ciliumLoadBalancerIPPool *v2alpha1.CiliumLoadBalancerIPPool, opts v1.UpdateOptions) (*v2alpha1.CiliumLoadBalancerIPPool, error)
+ UpdateStatus(ctx context.Context, ciliumLoadBalancerIPPool *v2alpha1.CiliumLoadBalancerIPPool, opts v1.UpdateOptions) (*v2alpha1.CiliumLoadBalancerIPPool, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*v2alpha1.CiliumLoadBalancerIPPool, error)
+ List(ctx context.Context, opts v1.ListOptions) (*v2alpha1.CiliumLoadBalancerIPPoolList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2alpha1.CiliumLoadBalancerIPPool, err error)
+ CiliumLoadBalancerIPPoolExpansion
+}
+
+// ciliumLoadBalancerIPPools implements CiliumLoadBalancerIPPoolInterface
+type ciliumLoadBalancerIPPools struct {
+ client rest.Interface
+}
+
+// newCiliumLoadBalancerIPPools returns a CiliumLoadBalancerIPPools
+func newCiliumLoadBalancerIPPools(c *CiliumV2alpha1Client) *ciliumLoadBalancerIPPools {
+ return &ciliumLoadBalancerIPPools{
+ client: c.RESTClient(),
+ }
+}
+
+// Get takes name of the ciliumLoadBalancerIPPool, and returns the corresponding ciliumLoadBalancerIPPool object, and an error if there is any.
+func (c *ciliumLoadBalancerIPPools) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2alpha1.CiliumLoadBalancerIPPool, err error) {
+ result = &v2alpha1.CiliumLoadBalancerIPPool{}
+ err = c.client.Get().
+ Resource("ciliumloadbalancerippools").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of CiliumLoadBalancerIPPools that match those selectors.
+func (c *ciliumLoadBalancerIPPools) List(ctx context.Context, opts v1.ListOptions) (result *v2alpha1.CiliumLoadBalancerIPPoolList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v2alpha1.CiliumLoadBalancerIPPoolList{}
+ err = c.client.Get().
+ Resource("ciliumloadbalancerippools").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested ciliumLoadBalancerIPPools.
+func (c *ciliumLoadBalancerIPPools) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Resource("ciliumloadbalancerippools").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a ciliumLoadBalancerIPPool and creates it. Returns the server's representation of the ciliumLoadBalancerIPPool, and an error, if there is any.
+func (c *ciliumLoadBalancerIPPools) Create(ctx context.Context, ciliumLoadBalancerIPPool *v2alpha1.CiliumLoadBalancerIPPool, opts v1.CreateOptions) (result *v2alpha1.CiliumLoadBalancerIPPool, err error) {
+ result = &v2alpha1.CiliumLoadBalancerIPPool{}
+ err = c.client.Post().
+ Resource("ciliumloadbalancerippools").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(ciliumLoadBalancerIPPool).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a ciliumLoadBalancerIPPool and updates it. Returns the server's representation of the ciliumLoadBalancerIPPool, and an error, if there is any.
+func (c *ciliumLoadBalancerIPPools) Update(ctx context.Context, ciliumLoadBalancerIPPool *v2alpha1.CiliumLoadBalancerIPPool, opts v1.UpdateOptions) (result *v2alpha1.CiliumLoadBalancerIPPool, err error) {
+ result = &v2alpha1.CiliumLoadBalancerIPPool{}
+ err = c.client.Put().
+ Resource("ciliumloadbalancerippools").
+ Name(ciliumLoadBalancerIPPool.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(ciliumLoadBalancerIPPool).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *ciliumLoadBalancerIPPools) UpdateStatus(ctx context.Context, ciliumLoadBalancerIPPool *v2alpha1.CiliumLoadBalancerIPPool, opts v1.UpdateOptions) (result *v2alpha1.CiliumLoadBalancerIPPool, err error) {
+ result = &v2alpha1.CiliumLoadBalancerIPPool{}
+ err = c.client.Put().
+ Resource("ciliumloadbalancerippools").
+ Name(ciliumLoadBalancerIPPool.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(ciliumLoadBalancerIPPool).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the ciliumLoadBalancerIPPool and deletes it. Returns an error if one occurs.
+func (c *ciliumLoadBalancerIPPools) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ return c.client.Delete().
+ Resource("ciliumloadbalancerippools").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *ciliumLoadBalancerIPPools) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Resource("ciliumloadbalancerippools").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched ciliumLoadBalancerIPPool.
+func (c *ciliumLoadBalancerIPPools) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2alpha1.CiliumLoadBalancerIPPool, err error) {
+ result = &v2alpha1.CiliumLoadBalancerIPPool{}
+ err = c.client.Patch(pt).
+ Resource("ciliumloadbalancerippools").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumnodeconfig.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumnodeconfig.go
new file mode 100644
index 000000000..bff4996a0
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumnodeconfig.go
@@ -0,0 +1,165 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v2alpha1
+
+import (
+ "context"
+ "time"
+
+ v2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1"
+ scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// CiliumNodeConfigsGetter has a method to return a CiliumNodeConfigInterface.
+// A group's client should implement this interface.
+type CiliumNodeConfigsGetter interface {
+ CiliumNodeConfigs(namespace string) CiliumNodeConfigInterface
+}
+
+// CiliumNodeConfigInterface has methods to work with CiliumNodeConfig resources.
+type CiliumNodeConfigInterface interface {
+ Create(ctx context.Context, ciliumNodeConfig *v2alpha1.CiliumNodeConfig, opts v1.CreateOptions) (*v2alpha1.CiliumNodeConfig, error)
+ Update(ctx context.Context, ciliumNodeConfig *v2alpha1.CiliumNodeConfig, opts v1.UpdateOptions) (*v2alpha1.CiliumNodeConfig, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*v2alpha1.CiliumNodeConfig, error)
+ List(ctx context.Context, opts v1.ListOptions) (*v2alpha1.CiliumNodeConfigList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2alpha1.CiliumNodeConfig, err error)
+ CiliumNodeConfigExpansion
+}
+
+// ciliumNodeConfigs implements CiliumNodeConfigInterface
+type ciliumNodeConfigs struct {
+ client rest.Interface
+ ns string
+}
+
+// newCiliumNodeConfigs returns a CiliumNodeConfigs
+func newCiliumNodeConfigs(c *CiliumV2alpha1Client, namespace string) *ciliumNodeConfigs {
+ return &ciliumNodeConfigs{
+ client: c.RESTClient(),
+ ns: namespace,
+ }
+}
+
+// Get takes name of the ciliumNodeConfig, and returns the corresponding ciliumNodeConfig object, and an error if there is any.
+func (c *ciliumNodeConfigs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2alpha1.CiliumNodeConfig, err error) {
+ result = &v2alpha1.CiliumNodeConfig{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("ciliumnodeconfigs").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of CiliumNodeConfigs that match those selectors.
+func (c *ciliumNodeConfigs) List(ctx context.Context, opts v1.ListOptions) (result *v2alpha1.CiliumNodeConfigList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v2alpha1.CiliumNodeConfigList{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("ciliumnodeconfigs").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested ciliumNodeConfigs.
+func (c *ciliumNodeConfigs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Namespace(c.ns).
+ Resource("ciliumnodeconfigs").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a ciliumNodeConfig and creates it. Returns the server's representation of the ciliumNodeConfig, and an error, if there is any.
+func (c *ciliumNodeConfigs) Create(ctx context.Context, ciliumNodeConfig *v2alpha1.CiliumNodeConfig, opts v1.CreateOptions) (result *v2alpha1.CiliumNodeConfig, err error) {
+ result = &v2alpha1.CiliumNodeConfig{}
+ err = c.client.Post().
+ Namespace(c.ns).
+ Resource("ciliumnodeconfigs").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(ciliumNodeConfig).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a ciliumNodeConfig and updates it. Returns the server's representation of the ciliumNodeConfig, and an error, if there is any.
+func (c *ciliumNodeConfigs) Update(ctx context.Context, ciliumNodeConfig *v2alpha1.CiliumNodeConfig, opts v1.UpdateOptions) (result *v2alpha1.CiliumNodeConfig, err error) {
+ result = &v2alpha1.CiliumNodeConfig{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("ciliumnodeconfigs").
+ Name(ciliumNodeConfig.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(ciliumNodeConfig).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the ciliumNodeConfig and deletes it. Returns an error if one occurs.
+func (c *ciliumNodeConfigs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("ciliumnodeconfigs").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *ciliumNodeConfigs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("ciliumnodeconfigs").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched ciliumNodeConfig.
+func (c *ciliumNodeConfigs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2alpha1.CiliumNodeConfig, err error) {
+ result = &v2alpha1.CiliumNodeConfig{}
+ err = c.client.Patch(pt).
+ Namespace(c.ns).
+ Resource("ciliumnodeconfigs").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumpodippool.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumpodippool.go
new file mode 100644
index 000000000..024442952
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumpodippool.go
@@ -0,0 +1,155 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v2alpha1
+
+import (
+ "context"
+ "time"
+
+ v2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1"
+ scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// CiliumPodIPPoolsGetter has a method to return a CiliumPodIPPoolInterface.
+// A group's client should implement this interface.
+type CiliumPodIPPoolsGetter interface {
+ CiliumPodIPPools() CiliumPodIPPoolInterface
+}
+
+// CiliumPodIPPoolInterface has methods to work with CiliumPodIPPool resources.
+type CiliumPodIPPoolInterface interface {
+ Create(ctx context.Context, ciliumPodIPPool *v2alpha1.CiliumPodIPPool, opts v1.CreateOptions) (*v2alpha1.CiliumPodIPPool, error)
+ Update(ctx context.Context, ciliumPodIPPool *v2alpha1.CiliumPodIPPool, opts v1.UpdateOptions) (*v2alpha1.CiliumPodIPPool, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*v2alpha1.CiliumPodIPPool, error)
+ List(ctx context.Context, opts v1.ListOptions) (*v2alpha1.CiliumPodIPPoolList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2alpha1.CiliumPodIPPool, err error)
+ CiliumPodIPPoolExpansion
+}
+
+// ciliumPodIPPools implements CiliumPodIPPoolInterface
+type ciliumPodIPPools struct {
+ client rest.Interface
+}
+
+// newCiliumPodIPPools returns a CiliumPodIPPools
+func newCiliumPodIPPools(c *CiliumV2alpha1Client) *ciliumPodIPPools {
+ return &ciliumPodIPPools{
+ client: c.RESTClient(),
+ }
+}
+
+// Get takes name of the ciliumPodIPPool, and returns the corresponding ciliumPodIPPool object, and an error if there is any.
+func (c *ciliumPodIPPools) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2alpha1.CiliumPodIPPool, err error) {
+ result = &v2alpha1.CiliumPodIPPool{}
+ err = c.client.Get().
+ Resource("ciliumpodippools").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of CiliumPodIPPools that match those selectors.
+func (c *ciliumPodIPPools) List(ctx context.Context, opts v1.ListOptions) (result *v2alpha1.CiliumPodIPPoolList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v2alpha1.CiliumPodIPPoolList{}
+ err = c.client.Get().
+ Resource("ciliumpodippools").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested ciliumPodIPPools.
+func (c *ciliumPodIPPools) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Resource("ciliumpodippools").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a ciliumPodIPPool and creates it. Returns the server's representation of the ciliumPodIPPool, and an error, if there is any.
+func (c *ciliumPodIPPools) Create(ctx context.Context, ciliumPodIPPool *v2alpha1.CiliumPodIPPool, opts v1.CreateOptions) (result *v2alpha1.CiliumPodIPPool, err error) {
+ result = &v2alpha1.CiliumPodIPPool{}
+ err = c.client.Post().
+ Resource("ciliumpodippools").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(ciliumPodIPPool).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a ciliumPodIPPool and updates it. Returns the server's representation of the ciliumPodIPPool, and an error, if there is any.
+func (c *ciliumPodIPPools) Update(ctx context.Context, ciliumPodIPPool *v2alpha1.CiliumPodIPPool, opts v1.UpdateOptions) (result *v2alpha1.CiliumPodIPPool, err error) {
+ result = &v2alpha1.CiliumPodIPPool{}
+ err = c.client.Put().
+ Resource("ciliumpodippools").
+ Name(ciliumPodIPPool.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(ciliumPodIPPool).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the ciliumPodIPPool and deletes it. Returns an error if one occurs.
+func (c *ciliumPodIPPools) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ return c.client.Delete().
+ Resource("ciliumpodippools").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *ciliumPodIPPools) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Resource("ciliumpodippools").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched ciliumPodIPPool.
+func (c *ciliumPodIPPools) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2alpha1.CiliumPodIPPool, err error) {
+ result = &v2alpha1.CiliumPodIPPool{}
+ err = c.client.Patch(pt).
+ Resource("ciliumpodippools").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/doc.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/doc.go
new file mode 100644
index 000000000..0caa68e33
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/doc.go
@@ -0,0 +1,7 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated typed clients.
+package v2alpha1
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/doc.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/doc.go
new file mode 100644
index 000000000..57bd090ef
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/doc.go
@@ -0,0 +1,7 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// Package fake has the automatically generated clients.
+package fake
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_cilium.io_client.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_cilium.io_client.go
new file mode 100644
index 000000000..4708a58f3
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_cilium.io_client.go
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v2alpha1 "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1"
+ rest "k8s.io/client-go/rest"
+ testing "k8s.io/client-go/testing"
+)
+
+type FakeCiliumV2alpha1 struct {
+ *testing.Fake
+}
+
+func (c *FakeCiliumV2alpha1) CiliumBGPPeeringPolicies() v2alpha1.CiliumBGPPeeringPolicyInterface {
+ return &FakeCiliumBGPPeeringPolicies{c}
+}
+
+func (c *FakeCiliumV2alpha1) CiliumCIDRGroups() v2alpha1.CiliumCIDRGroupInterface {
+ return &FakeCiliumCIDRGroups{c}
+}
+
+func (c *FakeCiliumV2alpha1) CiliumEndpointSlices() v2alpha1.CiliumEndpointSliceInterface {
+ return &FakeCiliumEndpointSlices{c}
+}
+
+func (c *FakeCiliumV2alpha1) CiliumL2AnnouncementPolicies() v2alpha1.CiliumL2AnnouncementPolicyInterface {
+ return &FakeCiliumL2AnnouncementPolicies{c}
+}
+
+func (c *FakeCiliumV2alpha1) CiliumLoadBalancerIPPools() v2alpha1.CiliumLoadBalancerIPPoolInterface {
+ return &FakeCiliumLoadBalancerIPPools{c}
+}
+
+func (c *FakeCiliumV2alpha1) CiliumNodeConfigs(namespace string) v2alpha1.CiliumNodeConfigInterface {
+ return &FakeCiliumNodeConfigs{c, namespace}
+}
+
+func (c *FakeCiliumV2alpha1) CiliumPodIPPools() v2alpha1.CiliumPodIPPoolInterface {
+ return &FakeCiliumPodIPPools{c}
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *FakeCiliumV2alpha1) RESTClient() rest.Interface {
+ var ret *rest.RESTClient
+ return ret
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliumbgppeeringpolicy.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliumbgppeeringpolicy.go
new file mode 100644
index 000000000..9da1136d4
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliumbgppeeringpolicy.go
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ "context"
+
+ v2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeCiliumBGPPeeringPolicies implements CiliumBGPPeeringPolicyInterface
+type FakeCiliumBGPPeeringPolicies struct {
+ Fake *FakeCiliumV2alpha1
+}
+
+var ciliumbgppeeringpoliciesResource = v2alpha1.SchemeGroupVersion.WithResource("ciliumbgppeeringpolicies")
+
+var ciliumbgppeeringpoliciesKind = v2alpha1.SchemeGroupVersion.WithKind("CiliumBGPPeeringPolicy")
+
+// Get takes name of the ciliumBGPPeeringPolicy, and returns the corresponding ciliumBGPPeeringPolicy object, and an error if there is any.
+func (c *FakeCiliumBGPPeeringPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2alpha1.CiliumBGPPeeringPolicy, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootGetAction(ciliumbgppeeringpoliciesResource, name), &v2alpha1.CiliumBGPPeeringPolicy{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2alpha1.CiliumBGPPeeringPolicy), err
+}
+
+// List takes label and field selectors, and returns the list of CiliumBGPPeeringPolicies that match those selectors.
+func (c *FakeCiliumBGPPeeringPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v2alpha1.CiliumBGPPeeringPolicyList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootListAction(ciliumbgppeeringpoliciesResource, ciliumbgppeeringpoliciesKind, opts), &v2alpha1.CiliumBGPPeeringPolicyList{})
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v2alpha1.CiliumBGPPeeringPolicyList{ListMeta: obj.(*v2alpha1.CiliumBGPPeeringPolicyList).ListMeta}
+ for _, item := range obj.(*v2alpha1.CiliumBGPPeeringPolicyList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested ciliumBGPPeeringPolicies.
+func (c *FakeCiliumBGPPeeringPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewRootWatchAction(ciliumbgppeeringpoliciesResource, opts))
+}
+
+// Create takes the representation of a ciliumBGPPeeringPolicy and creates it. Returns the server's representation of the ciliumBGPPeeringPolicy, and an error, if there is any.
+func (c *FakeCiliumBGPPeeringPolicies) Create(ctx context.Context, ciliumBGPPeeringPolicy *v2alpha1.CiliumBGPPeeringPolicy, opts v1.CreateOptions) (result *v2alpha1.CiliumBGPPeeringPolicy, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootCreateAction(ciliumbgppeeringpoliciesResource, ciliumBGPPeeringPolicy), &v2alpha1.CiliumBGPPeeringPolicy{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2alpha1.CiliumBGPPeeringPolicy), err
+}
+
+// Update takes the representation of a ciliumBGPPeeringPolicy and updates it. Returns the server's representation of the ciliumBGPPeeringPolicy, and an error, if there is any.
+func (c *FakeCiliumBGPPeeringPolicies) Update(ctx context.Context, ciliumBGPPeeringPolicy *v2alpha1.CiliumBGPPeeringPolicy, opts v1.UpdateOptions) (result *v2alpha1.CiliumBGPPeeringPolicy, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootUpdateAction(ciliumbgppeeringpoliciesResource, ciliumBGPPeeringPolicy), &v2alpha1.CiliumBGPPeeringPolicy{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2alpha1.CiliumBGPPeeringPolicy), err
+}
+
+// Delete takes name of the ciliumBGPPeeringPolicy and deletes it. Returns an error if one occurs.
+func (c *FakeCiliumBGPPeeringPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewRootDeleteActionWithOptions(ciliumbgppeeringpoliciesResource, name, opts), &v2alpha1.CiliumBGPPeeringPolicy{})
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeCiliumBGPPeeringPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ action := testing.NewRootDeleteCollectionAction(ciliumbgppeeringpoliciesResource, listOpts)
+
+ _, err := c.Fake.Invokes(action, &v2alpha1.CiliumBGPPeeringPolicyList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched ciliumBGPPeeringPolicy.
+func (c *FakeCiliumBGPPeeringPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2alpha1.CiliumBGPPeeringPolicy, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootPatchSubresourceAction(ciliumbgppeeringpoliciesResource, name, pt, data, subresources...), &v2alpha1.CiliumBGPPeeringPolicy{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2alpha1.CiliumBGPPeeringPolicy), err
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliumcidrgroup.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliumcidrgroup.go
new file mode 100644
index 000000000..ec271bbc4
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliumcidrgroup.go
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ "context"
+
+ v2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeCiliumCIDRGroups implements CiliumCIDRGroupInterface
+type FakeCiliumCIDRGroups struct {
+ Fake *FakeCiliumV2alpha1
+}
+
+var ciliumcidrgroupsResource = v2alpha1.SchemeGroupVersion.WithResource("ciliumcidrgroups")
+
+var ciliumcidrgroupsKind = v2alpha1.SchemeGroupVersion.WithKind("CiliumCIDRGroup")
+
+// Get takes name of the ciliumCIDRGroup, and returns the corresponding ciliumCIDRGroup object, and an error if there is any.
+func (c *FakeCiliumCIDRGroups) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2alpha1.CiliumCIDRGroup, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootGetAction(ciliumcidrgroupsResource, name), &v2alpha1.CiliumCIDRGroup{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2alpha1.CiliumCIDRGroup), err
+}
+
+// List takes label and field selectors, and returns the list of CiliumCIDRGroups that match those selectors.
+func (c *FakeCiliumCIDRGroups) List(ctx context.Context, opts v1.ListOptions) (result *v2alpha1.CiliumCIDRGroupList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootListAction(ciliumcidrgroupsResource, ciliumcidrgroupsKind, opts), &v2alpha1.CiliumCIDRGroupList{})
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v2alpha1.CiliumCIDRGroupList{ListMeta: obj.(*v2alpha1.CiliumCIDRGroupList).ListMeta}
+ for _, item := range obj.(*v2alpha1.CiliumCIDRGroupList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested ciliumCIDRGroups.
+func (c *FakeCiliumCIDRGroups) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewRootWatchAction(ciliumcidrgroupsResource, opts))
+}
+
+// Create takes the representation of a ciliumCIDRGroup and creates it. Returns the server's representation of the ciliumCIDRGroup, and an error, if there is any.
+func (c *FakeCiliumCIDRGroups) Create(ctx context.Context, ciliumCIDRGroup *v2alpha1.CiliumCIDRGroup, opts v1.CreateOptions) (result *v2alpha1.CiliumCIDRGroup, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootCreateAction(ciliumcidrgroupsResource, ciliumCIDRGroup), &v2alpha1.CiliumCIDRGroup{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2alpha1.CiliumCIDRGroup), err
+}
+
+// Update takes the representation of a ciliumCIDRGroup and updates it. Returns the server's representation of the ciliumCIDRGroup, and an error, if there is any.
+func (c *FakeCiliumCIDRGroups) Update(ctx context.Context, ciliumCIDRGroup *v2alpha1.CiliumCIDRGroup, opts v1.UpdateOptions) (result *v2alpha1.CiliumCIDRGroup, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootUpdateAction(ciliumcidrgroupsResource, ciliumCIDRGroup), &v2alpha1.CiliumCIDRGroup{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2alpha1.CiliumCIDRGroup), err
+}
+
+// Delete takes name of the ciliumCIDRGroup and deletes it. Returns an error if one occurs.
+func (c *FakeCiliumCIDRGroups) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewRootDeleteActionWithOptions(ciliumcidrgroupsResource, name, opts), &v2alpha1.CiliumCIDRGroup{})
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeCiliumCIDRGroups) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ action := testing.NewRootDeleteCollectionAction(ciliumcidrgroupsResource, listOpts)
+
+ _, err := c.Fake.Invokes(action, &v2alpha1.CiliumCIDRGroupList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched ciliumCIDRGroup.
+func (c *FakeCiliumCIDRGroups) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2alpha1.CiliumCIDRGroup, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootPatchSubresourceAction(ciliumcidrgroupsResource, name, pt, data, subresources...), &v2alpha1.CiliumCIDRGroup{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2alpha1.CiliumCIDRGroup), err
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliumendpointslice.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliumendpointslice.go
new file mode 100644
index 000000000..447afbdde
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliumendpointslice.go
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ "context"
+
+ v2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeCiliumEndpointSlices implements CiliumEndpointSliceInterface
+type FakeCiliumEndpointSlices struct {
+ Fake *FakeCiliumV2alpha1
+}
+
+var ciliumendpointslicesResource = v2alpha1.SchemeGroupVersion.WithResource("ciliumendpointslices")
+
+var ciliumendpointslicesKind = v2alpha1.SchemeGroupVersion.WithKind("CiliumEndpointSlice")
+
+// Get takes name of the ciliumEndpointSlice, and returns the corresponding ciliumEndpointSlice object, and an error if there is any.
+func (c *FakeCiliumEndpointSlices) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2alpha1.CiliumEndpointSlice, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootGetAction(ciliumendpointslicesResource, name), &v2alpha1.CiliumEndpointSlice{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2alpha1.CiliumEndpointSlice), err
+}
+
+// List takes label and field selectors, and returns the list of CiliumEndpointSlices that match those selectors.
+func (c *FakeCiliumEndpointSlices) List(ctx context.Context, opts v1.ListOptions) (result *v2alpha1.CiliumEndpointSliceList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootListAction(ciliumendpointslicesResource, ciliumendpointslicesKind, opts), &v2alpha1.CiliumEndpointSliceList{})
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v2alpha1.CiliumEndpointSliceList{ListMeta: obj.(*v2alpha1.CiliumEndpointSliceList).ListMeta}
+ for _, item := range obj.(*v2alpha1.CiliumEndpointSliceList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested ciliumEndpointSlices.
+func (c *FakeCiliumEndpointSlices) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewRootWatchAction(ciliumendpointslicesResource, opts))
+}
+
+// Create takes the representation of a ciliumEndpointSlice and creates it. Returns the server's representation of the ciliumEndpointSlice, and an error, if there is any.
+func (c *FakeCiliumEndpointSlices) Create(ctx context.Context, ciliumEndpointSlice *v2alpha1.CiliumEndpointSlice, opts v1.CreateOptions) (result *v2alpha1.CiliumEndpointSlice, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootCreateAction(ciliumendpointslicesResource, ciliumEndpointSlice), &v2alpha1.CiliumEndpointSlice{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2alpha1.CiliumEndpointSlice), err
+}
+
+// Update takes the representation of a ciliumEndpointSlice and updates it. Returns the server's representation of the ciliumEndpointSlice, and an error, if there is any.
+func (c *FakeCiliumEndpointSlices) Update(ctx context.Context, ciliumEndpointSlice *v2alpha1.CiliumEndpointSlice, opts v1.UpdateOptions) (result *v2alpha1.CiliumEndpointSlice, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootUpdateAction(ciliumendpointslicesResource, ciliumEndpointSlice), &v2alpha1.CiliumEndpointSlice{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2alpha1.CiliumEndpointSlice), err
+}
+
+// Delete takes name of the ciliumEndpointSlice and deletes it. Returns an error if one occurs.
+func (c *FakeCiliumEndpointSlices) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewRootDeleteActionWithOptions(ciliumendpointslicesResource, name, opts), &v2alpha1.CiliumEndpointSlice{})
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeCiliumEndpointSlices) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ action := testing.NewRootDeleteCollectionAction(ciliumendpointslicesResource, listOpts)
+
+ _, err := c.Fake.Invokes(action, &v2alpha1.CiliumEndpointSliceList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched ciliumEndpointSlice.
+func (c *FakeCiliumEndpointSlices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2alpha1.CiliumEndpointSlice, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootPatchSubresourceAction(ciliumendpointslicesResource, name, pt, data, subresources...), &v2alpha1.CiliumEndpointSlice{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2alpha1.CiliumEndpointSlice), err
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliuml2announcementpolicy.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliuml2announcementpolicy.go
new file mode 100644
index 000000000..2ad271efc
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliuml2announcementpolicy.go
@@ -0,0 +1,119 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ "context"
+
+ v2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeCiliumL2AnnouncementPolicies implements CiliumL2AnnouncementPolicyInterface
+type FakeCiliumL2AnnouncementPolicies struct {
+ Fake *FakeCiliumV2alpha1
+}
+
+var ciliuml2announcementpoliciesResource = v2alpha1.SchemeGroupVersion.WithResource("ciliuml2announcementpolicies")
+
+var ciliuml2announcementpoliciesKind = v2alpha1.SchemeGroupVersion.WithKind("CiliumL2AnnouncementPolicy")
+
+// Get takes name of the ciliumL2AnnouncementPolicy, and returns the corresponding ciliumL2AnnouncementPolicy object, and an error if there is any.
+func (c *FakeCiliumL2AnnouncementPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2alpha1.CiliumL2AnnouncementPolicy, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootGetAction(ciliuml2announcementpoliciesResource, name), &v2alpha1.CiliumL2AnnouncementPolicy{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2alpha1.CiliumL2AnnouncementPolicy), err
+}
+
+// List takes label and field selectors, and returns the list of CiliumL2AnnouncementPolicies that match those selectors.
+func (c *FakeCiliumL2AnnouncementPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v2alpha1.CiliumL2AnnouncementPolicyList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootListAction(ciliuml2announcementpoliciesResource, ciliuml2announcementpoliciesKind, opts), &v2alpha1.CiliumL2AnnouncementPolicyList{})
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v2alpha1.CiliumL2AnnouncementPolicyList{ListMeta: obj.(*v2alpha1.CiliumL2AnnouncementPolicyList).ListMeta}
+ for _, item := range obj.(*v2alpha1.CiliumL2AnnouncementPolicyList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested ciliumL2AnnouncementPolicies.
+func (c *FakeCiliumL2AnnouncementPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewRootWatchAction(ciliuml2announcementpoliciesResource, opts))
+}
+
+// Create takes the representation of a ciliumL2AnnouncementPolicy and creates it. Returns the server's representation of the ciliumL2AnnouncementPolicy, and an error, if there is any.
+func (c *FakeCiliumL2AnnouncementPolicies) Create(ctx context.Context, ciliumL2AnnouncementPolicy *v2alpha1.CiliumL2AnnouncementPolicy, opts v1.CreateOptions) (result *v2alpha1.CiliumL2AnnouncementPolicy, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootCreateAction(ciliuml2announcementpoliciesResource, ciliumL2AnnouncementPolicy), &v2alpha1.CiliumL2AnnouncementPolicy{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2alpha1.CiliumL2AnnouncementPolicy), err
+}
+
+// Update takes the representation of a ciliumL2AnnouncementPolicy and updates it. Returns the server's representation of the ciliumL2AnnouncementPolicy, and an error, if there is any.
+func (c *FakeCiliumL2AnnouncementPolicies) Update(ctx context.Context, ciliumL2AnnouncementPolicy *v2alpha1.CiliumL2AnnouncementPolicy, opts v1.UpdateOptions) (result *v2alpha1.CiliumL2AnnouncementPolicy, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootUpdateAction(ciliuml2announcementpoliciesResource, ciliumL2AnnouncementPolicy), &v2alpha1.CiliumL2AnnouncementPolicy{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2alpha1.CiliumL2AnnouncementPolicy), err
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *FakeCiliumL2AnnouncementPolicies) UpdateStatus(ctx context.Context, ciliumL2AnnouncementPolicy *v2alpha1.CiliumL2AnnouncementPolicy, opts v1.UpdateOptions) (*v2alpha1.CiliumL2AnnouncementPolicy, error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootUpdateSubresourceAction(ciliuml2announcementpoliciesResource, "status", ciliumL2AnnouncementPolicy), &v2alpha1.CiliumL2AnnouncementPolicy{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2alpha1.CiliumL2AnnouncementPolicy), err
+}
+
+// Delete takes name of the ciliumL2AnnouncementPolicy and deletes it. Returns an error if one occurs.
+func (c *FakeCiliumL2AnnouncementPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewRootDeleteActionWithOptions(ciliuml2announcementpoliciesResource, name, opts), &v2alpha1.CiliumL2AnnouncementPolicy{})
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeCiliumL2AnnouncementPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ action := testing.NewRootDeleteCollectionAction(ciliuml2announcementpoliciesResource, listOpts)
+
+ _, err := c.Fake.Invokes(action, &v2alpha1.CiliumL2AnnouncementPolicyList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched ciliumL2AnnouncementPolicy.
+func (c *FakeCiliumL2AnnouncementPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2alpha1.CiliumL2AnnouncementPolicy, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootPatchSubresourceAction(ciliuml2announcementpoliciesResource, name, pt, data, subresources...), &v2alpha1.CiliumL2AnnouncementPolicy{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2alpha1.CiliumL2AnnouncementPolicy), err
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliumloadbalancerippool.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliumloadbalancerippool.go
new file mode 100644
index 000000000..b150f24fa
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliumloadbalancerippool.go
@@ -0,0 +1,119 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ "context"
+
+ v2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeCiliumLoadBalancerIPPools implements CiliumLoadBalancerIPPoolInterface
+type FakeCiliumLoadBalancerIPPools struct {
+ Fake *FakeCiliumV2alpha1
+}
+
+var ciliumloadbalancerippoolsResource = v2alpha1.SchemeGroupVersion.WithResource("ciliumloadbalancerippools")
+
+var ciliumloadbalancerippoolsKind = v2alpha1.SchemeGroupVersion.WithKind("CiliumLoadBalancerIPPool")
+
+// Get takes name of the ciliumLoadBalancerIPPool, and returns the corresponding ciliumLoadBalancerIPPool object, and an error if there is any.
+func (c *FakeCiliumLoadBalancerIPPools) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2alpha1.CiliumLoadBalancerIPPool, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootGetAction(ciliumloadbalancerippoolsResource, name), &v2alpha1.CiliumLoadBalancerIPPool{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2alpha1.CiliumLoadBalancerIPPool), err
+}
+
+// List takes label and field selectors, and returns the list of CiliumLoadBalancerIPPools that match those selectors.
+func (c *FakeCiliumLoadBalancerIPPools) List(ctx context.Context, opts v1.ListOptions) (result *v2alpha1.CiliumLoadBalancerIPPoolList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootListAction(ciliumloadbalancerippoolsResource, ciliumloadbalancerippoolsKind, opts), &v2alpha1.CiliumLoadBalancerIPPoolList{})
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v2alpha1.CiliumLoadBalancerIPPoolList{ListMeta: obj.(*v2alpha1.CiliumLoadBalancerIPPoolList).ListMeta}
+ for _, item := range obj.(*v2alpha1.CiliumLoadBalancerIPPoolList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested ciliumLoadBalancerIPPools.
+func (c *FakeCiliumLoadBalancerIPPools) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewRootWatchAction(ciliumloadbalancerippoolsResource, opts))
+}
+
+// Create takes the representation of a ciliumLoadBalancerIPPool and creates it. Returns the server's representation of the ciliumLoadBalancerIPPool, and an error, if there is any.
+func (c *FakeCiliumLoadBalancerIPPools) Create(ctx context.Context, ciliumLoadBalancerIPPool *v2alpha1.CiliumLoadBalancerIPPool, opts v1.CreateOptions) (result *v2alpha1.CiliumLoadBalancerIPPool, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootCreateAction(ciliumloadbalancerippoolsResource, ciliumLoadBalancerIPPool), &v2alpha1.CiliumLoadBalancerIPPool{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2alpha1.CiliumLoadBalancerIPPool), err
+}
+
+// Update takes the representation of a ciliumLoadBalancerIPPool and updates it. Returns the server's representation of the ciliumLoadBalancerIPPool, and an error, if there is any.
+func (c *FakeCiliumLoadBalancerIPPools) Update(ctx context.Context, ciliumLoadBalancerIPPool *v2alpha1.CiliumLoadBalancerIPPool, opts v1.UpdateOptions) (result *v2alpha1.CiliumLoadBalancerIPPool, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootUpdateAction(ciliumloadbalancerippoolsResource, ciliumLoadBalancerIPPool), &v2alpha1.CiliumLoadBalancerIPPool{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2alpha1.CiliumLoadBalancerIPPool), err
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *FakeCiliumLoadBalancerIPPools) UpdateStatus(ctx context.Context, ciliumLoadBalancerIPPool *v2alpha1.CiliumLoadBalancerIPPool, opts v1.UpdateOptions) (*v2alpha1.CiliumLoadBalancerIPPool, error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootUpdateSubresourceAction(ciliumloadbalancerippoolsResource, "status", ciliumLoadBalancerIPPool), &v2alpha1.CiliumLoadBalancerIPPool{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2alpha1.CiliumLoadBalancerIPPool), err
+}
+
+// Delete takes name of the ciliumLoadBalancerIPPool and deletes it. Returns an error if one occurs.
+func (c *FakeCiliumLoadBalancerIPPools) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewRootDeleteActionWithOptions(ciliumloadbalancerippoolsResource, name, opts), &v2alpha1.CiliumLoadBalancerIPPool{})
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeCiliumLoadBalancerIPPools) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ action := testing.NewRootDeleteCollectionAction(ciliumloadbalancerippoolsResource, listOpts)
+
+ _, err := c.Fake.Invokes(action, &v2alpha1.CiliumLoadBalancerIPPoolList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched ciliumLoadBalancerIPPool.
+func (c *FakeCiliumLoadBalancerIPPools) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2alpha1.CiliumLoadBalancerIPPool, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootPatchSubresourceAction(ciliumloadbalancerippoolsResource, name, pt, data, subresources...), &v2alpha1.CiliumLoadBalancerIPPool{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2alpha1.CiliumLoadBalancerIPPool), err
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliumnodeconfig.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliumnodeconfig.go
new file mode 100644
index 000000000..a826f0bc0
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliumnodeconfig.go
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ "context"
+
+ v2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeCiliumNodeConfigs implements CiliumNodeConfigInterface
+type FakeCiliumNodeConfigs struct {
+ Fake *FakeCiliumV2alpha1
+ ns string
+}
+
+var ciliumnodeconfigsResource = v2alpha1.SchemeGroupVersion.WithResource("ciliumnodeconfigs")
+
+var ciliumnodeconfigsKind = v2alpha1.SchemeGroupVersion.WithKind("CiliumNodeConfig")
+
+// Get takes name of the ciliumNodeConfig, and returns the corresponding ciliumNodeConfig object, and an error if there is any.
+func (c *FakeCiliumNodeConfigs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2alpha1.CiliumNodeConfig, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(ciliumnodeconfigsResource, c.ns, name), &v2alpha1.CiliumNodeConfig{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2alpha1.CiliumNodeConfig), err
+}
+
+// List takes label and field selectors, and returns the list of CiliumNodeConfigs that match those selectors.
+func (c *FakeCiliumNodeConfigs) List(ctx context.Context, opts v1.ListOptions) (result *v2alpha1.CiliumNodeConfigList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(ciliumnodeconfigsResource, ciliumnodeconfigsKind, c.ns, opts), &v2alpha1.CiliumNodeConfigList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v2alpha1.CiliumNodeConfigList{ListMeta: obj.(*v2alpha1.CiliumNodeConfigList).ListMeta}
+ for _, item := range obj.(*v2alpha1.CiliumNodeConfigList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested ciliumNodeConfigs.
+func (c *FakeCiliumNodeConfigs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(ciliumnodeconfigsResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a ciliumNodeConfig and creates it. Returns the server's representation of the ciliumNodeConfig, and an error, if there is any.
+func (c *FakeCiliumNodeConfigs) Create(ctx context.Context, ciliumNodeConfig *v2alpha1.CiliumNodeConfig, opts v1.CreateOptions) (result *v2alpha1.CiliumNodeConfig, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(ciliumnodeconfigsResource, c.ns, ciliumNodeConfig), &v2alpha1.CiliumNodeConfig{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2alpha1.CiliumNodeConfig), err
+}
+
+// Update takes the representation of a ciliumNodeConfig and updates it. Returns the server's representation of the ciliumNodeConfig, and an error, if there is any.
+func (c *FakeCiliumNodeConfigs) Update(ctx context.Context, ciliumNodeConfig *v2alpha1.CiliumNodeConfig, opts v1.UpdateOptions) (result *v2alpha1.CiliumNodeConfig, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(ciliumnodeconfigsResource, c.ns, ciliumNodeConfig), &v2alpha1.CiliumNodeConfig{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2alpha1.CiliumNodeConfig), err
+}
+
+// Delete takes name of the ciliumNodeConfig and deletes it. Returns an error if one occurs.
+func (c *FakeCiliumNodeConfigs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteActionWithOptions(ciliumnodeconfigsResource, c.ns, name, opts), &v2alpha1.CiliumNodeConfig{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeCiliumNodeConfigs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(ciliumnodeconfigsResource, c.ns, listOpts)
+
+ _, err := c.Fake.Invokes(action, &v2alpha1.CiliumNodeConfigList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched ciliumNodeConfig.
+func (c *FakeCiliumNodeConfigs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2alpha1.CiliumNodeConfig, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(ciliumnodeconfigsResource, c.ns, name, pt, data, subresources...), &v2alpha1.CiliumNodeConfig{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2alpha1.CiliumNodeConfig), err
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliumpodippool.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliumpodippool.go
new file mode 100644
index 000000000..6922a8582
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake/fake_ciliumpodippool.go
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ "context"
+
+ v2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeCiliumPodIPPools implements CiliumPodIPPoolInterface
+type FakeCiliumPodIPPools struct {
+ Fake *FakeCiliumV2alpha1
+}
+
+var ciliumpodippoolsResource = v2alpha1.SchemeGroupVersion.WithResource("ciliumpodippools")
+
+var ciliumpodippoolsKind = v2alpha1.SchemeGroupVersion.WithKind("CiliumPodIPPool")
+
+// Get takes name of the ciliumPodIPPool, and returns the corresponding ciliumPodIPPool object, and an error if there is any.
+func (c *FakeCiliumPodIPPools) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2alpha1.CiliumPodIPPool, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootGetAction(ciliumpodippoolsResource, name), &v2alpha1.CiliumPodIPPool{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2alpha1.CiliumPodIPPool), err
+}
+
+// List takes label and field selectors, and returns the list of CiliumPodIPPools that match those selectors.
+func (c *FakeCiliumPodIPPools) List(ctx context.Context, opts v1.ListOptions) (result *v2alpha1.CiliumPodIPPoolList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootListAction(ciliumpodippoolsResource, ciliumpodippoolsKind, opts), &v2alpha1.CiliumPodIPPoolList{})
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v2alpha1.CiliumPodIPPoolList{ListMeta: obj.(*v2alpha1.CiliumPodIPPoolList).ListMeta}
+ for _, item := range obj.(*v2alpha1.CiliumPodIPPoolList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested ciliumPodIPPools.
+func (c *FakeCiliumPodIPPools) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewRootWatchAction(ciliumpodippoolsResource, opts))
+}
+
+// Create takes the representation of a ciliumPodIPPool and creates it. Returns the server's representation of the ciliumPodIPPool, and an error, if there is any.
+func (c *FakeCiliumPodIPPools) Create(ctx context.Context, ciliumPodIPPool *v2alpha1.CiliumPodIPPool, opts v1.CreateOptions) (result *v2alpha1.CiliumPodIPPool, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootCreateAction(ciliumpodippoolsResource, ciliumPodIPPool), &v2alpha1.CiliumPodIPPool{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2alpha1.CiliumPodIPPool), err
+}
+
+// Update takes the representation of a ciliumPodIPPool and updates it. Returns the server's representation of the ciliumPodIPPool, and an error, if there is any.
+func (c *FakeCiliumPodIPPools) Update(ctx context.Context, ciliumPodIPPool *v2alpha1.CiliumPodIPPool, opts v1.UpdateOptions) (result *v2alpha1.CiliumPodIPPool, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootUpdateAction(ciliumpodippoolsResource, ciliumPodIPPool), &v2alpha1.CiliumPodIPPool{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2alpha1.CiliumPodIPPool), err
+}
+
+// Delete takes name of the ciliumPodIPPool and deletes it. Returns an error if one occurs.
+func (c *FakeCiliumPodIPPools) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewRootDeleteActionWithOptions(ciliumpodippoolsResource, name, opts), &v2alpha1.CiliumPodIPPool{})
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeCiliumPodIPPools) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ action := testing.NewRootDeleteCollectionAction(ciliumpodippoolsResource, listOpts)
+
+ _, err := c.Fake.Invokes(action, &v2alpha1.CiliumPodIPPoolList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched ciliumPodIPPool.
+func (c *FakeCiliumPodIPPools) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2alpha1.CiliumPodIPPool, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootPatchSubresourceAction(ciliumpodippoolsResource, name, pt, data, subresources...), &v2alpha1.CiliumPodIPPool{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v2alpha1.CiliumPodIPPool), err
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/generated_expansion.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/generated_expansion.go
new file mode 100644
index 000000000..7dc4b1076
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/generated_expansion.go
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v2alpha1
+
+type CiliumBGPPeeringPolicyExpansion interface{}
+
+type CiliumCIDRGroupExpansion interface{}
+
+type CiliumEndpointSliceExpansion interface{}
+
+type CiliumL2AnnouncementPolicyExpansion interface{}
+
+type CiliumLoadBalancerIPPoolExpansion interface{}
+
+type CiliumNodeConfigExpansion interface{}
+
+type CiliumPodIPPoolExpansion interface{}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/config.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/config.go
new file mode 100644
index 000000000..ac685bb4b
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/config.go
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package client
+
+import (
+ "os"
+ "time"
+
+ "github.com/spf13/pflag"
+
+ "github.com/cilium/cilium/pkg/defaults"
+ "github.com/cilium/cilium/pkg/option"
+)
+
+type Config struct {
+ // EnableK8s is a flag that, when set to false, forcibly disables the clientset, to let cilium
+ // operates with CNI-compatible orchestrators other than Kubernetes. Default to true.
+ EnableK8s bool
+
+ // K8sAPIServer is the kubernetes api address server (for https use --k8s-kubeconfig-path instead)
+ K8sAPIServer string
+
+ // K8sKubeConfigPath is the absolute path of the kubernetes kubeconfig file
+ K8sKubeConfigPath string
+
+ // K8sClientQPS is the queries per second limit for the K8s client. Defaults to k8s client defaults.
+ K8sClientQPS float32
+
+ // K8sClientBurst is the burst value allowed for the K8s client. Defaults to k8s client defaults.
+ K8sClientBurst int
+
+ // K8sHeartbeatTimeout configures the timeout for apiserver heartbeat
+ K8sHeartbeatTimeout time.Duration
+
+ // K8sEnableAPIDiscovery enables Kubernetes API discovery
+ EnableK8sAPIDiscovery bool
+}
+
+var defaultConfig = Config{
+ EnableK8s: true,
+ K8sAPIServer: "",
+ K8sKubeConfigPath: "",
+ K8sClientQPS: defaults.K8sClientQPSLimit,
+ K8sClientBurst: defaults.K8sClientBurst,
+ K8sHeartbeatTimeout: 30 * time.Second,
+ EnableK8sAPIDiscovery: defaults.K8sEnableAPIDiscovery,
+}
+
+func (def Config) Flags(flags *pflag.FlagSet) {
+ flags.Bool(option.EnableK8s, def.EnableK8s, "Enable the k8s clientset")
+ flags.String(option.K8sAPIServer, def.K8sAPIServer, "Kubernetes API server URL")
+ flags.String(option.K8sKubeConfigPath, def.K8sKubeConfigPath, "Absolute path of the kubernetes kubeconfig file")
+ flags.Float32(option.K8sClientQPSLimit, def.K8sClientQPS, "Queries per second limit for the K8s client")
+ flags.Int(option.K8sClientBurst, def.K8sClientBurst, "Burst value allowed for the K8s client")
+ flags.Duration(option.K8sHeartbeatTimeout, def.K8sHeartbeatTimeout, "Configures the timeout for api-server heartbeat, set to 0 to disable")
+ flags.Bool(option.K8sEnableAPIDiscovery, def.EnableK8sAPIDiscovery, "Enable discovery of Kubernetes API groups and resources with the discovery API")
+}
+
+func (cfg Config) isEnabled() bool {
+ if !cfg.EnableK8s {
+ return false
+ }
+ return cfg.K8sAPIServer != "" ||
+ cfg.K8sKubeConfigPath != "" ||
+ (os.Getenv("KUBERNETES_SERVICE_HOST") != "" &&
+ os.Getenv("KUBERNETES_SERVICE_PORT") != "") ||
+ os.Getenv("K8S_NODE_NAME") != ""
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/getters.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/getters.go
new file mode 100644
index 000000000..fc4994000
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/getters.go
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package client
+
+import (
+ "context"
+ "fmt"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ cilium_v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2"
+ slim_corev1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1"
+)
+
+// Getters is a set of methods for retrieving common objects.
+type Getters interface {
+ GetSecrets(ctx context.Context, namespace, name string) (map[string][]byte, error)
+ GetK8sNode(ctx context.Context, nodeName string) (*slim_corev1.Node, error)
+ GetCiliumNode(ctx context.Context, nodeName string) (*cilium_v2.CiliumNode, error)
+}
+
+// clientsetGetters implements the Getters interface in terms of the clientset.
+type clientsetGetters struct {
+ Clientset
+}
+
+// GetSecrets returns the secrets found in the given namespace and name.
+func (cs *clientsetGetters) GetSecrets(ctx context.Context, ns, name string) (map[string][]byte, error) {
+ if !cs.IsEnabled() {
+ return nil, fmt.Errorf("GetSecrets: No k8s, cannot access k8s secrets")
+ }
+
+ result, err := cs.CoreV1().Secrets(ns).Get(ctx, name, metav1.GetOptions{})
+ if err != nil {
+ return nil, err
+ }
+ return result.Data, nil
+}
+
+// GetK8sNode returns the node with the given nodeName.
+func (cs *clientsetGetters) GetK8sNode(ctx context.Context, nodeName string) (*slim_corev1.Node, error) {
+ if !cs.IsEnabled() {
+ return nil, fmt.Errorf("GetK8sNode: No k8s, cannot access k8s nodes")
+ }
+
+ return cs.Slim().CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{})
+}
+
+// GetCiliumNode returns the CiliumNode with the given nodeName.
+func (cs *clientsetGetters) GetCiliumNode(ctx context.Context, nodeName string) (*cilium_v2.CiliumNode, error) {
+ if !cs.IsEnabled() {
+ return nil, fmt.Errorf("GetK8sNode: No k8s, cannot access k8s nodes")
+ }
+
+ return cs.CiliumV2().CiliumNodes().Get(ctx, nodeName, metav1.GetOptions{})
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/constants/const.go b/vendor/github.com/cilium/cilium/pkg/k8s/constants/const.go
new file mode 100644
index 000000000..1f6cb6dde
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/constants/const.go
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package constants
+
+const (
+ // EnvNodeNameSpec is the environment label used by Kubernetes to
+ // specify the node's name.
+ EnvNodeNameSpec = "K8S_NODE_NAME"
+)
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/identitybackend/identity.go b/vendor/github.com/cilium/cilium/pkg/k8s/identitybackend/identity.go
new file mode 100644
index 000000000..2fd33a186
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/identitybackend/identity.go
@@ -0,0 +1,405 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package identitybackend
+
+import (
+ "context"
+ "fmt"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+
+ "github.com/sirupsen/logrus"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/client-go/tools/cache"
+
+ "github.com/cilium/cilium/pkg/allocator"
+ "github.com/cilium/cilium/pkg/idpool"
+ k8sConst "github.com/cilium/cilium/pkg/k8s/apis/cilium.io"
+ v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2"
+ clientset "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned"
+ "github.com/cilium/cilium/pkg/k8s/informer"
+ k8sUtils "github.com/cilium/cilium/pkg/k8s/utils"
+ "github.com/cilium/cilium/pkg/kvstore"
+ "github.com/cilium/cilium/pkg/labels"
+ "github.com/cilium/cilium/pkg/logging"
+ "github.com/cilium/cilium/pkg/logging/logfields"
+ "github.com/cilium/cilium/pkg/rate"
+)
+
+var (
+ log = logging.DefaultLogger.WithField(logfields.LogSubsys, "crd-allocator")
+)
+
+const (
+ // HeartBeatAnnotation is an annotation applied by the operator to indicate
+ // that a CiliumIdentity has been marked for deletion.
+ HeartBeatAnnotation = "io.cilium.heartbeat"
+
+ k8sPrefix = labels.LabelSourceK8s + ":"
+ k8sNamespaceLabelPrefix = labels.LabelSourceK8s + ":" + k8sConst.PodNamespaceMetaLabels + labels.PathDelimiter
+
+ // byKeyIndex is the name of the index of the identities by key.
+ byKeyIndex = "by-key-index"
+)
+
+func NewCRDBackend(c CRDBackendConfiguration) (allocator.Backend, error) {
+ return &crdBackend{CRDBackendConfiguration: c}, nil
+}
+
+type CRDBackendConfiguration struct {
+ Store cache.Indexer
+ Client clientset.Interface
+ KeyFunc func(map[string]string) allocator.AllocatorKey
+}
+
+type crdBackend struct {
+ CRDBackendConfiguration
+}
+
+func (c *crdBackend) DeleteAllKeys(ctx context.Context) {
+}
+
+// sanitizeK8sLabels strips the 'k8s:' prefix in the labels generated by
+// AllocatorKey.GetAsMap (when the key is k8s labels). In the CRD identity case
+// we map the labels directly to the ciliumidentity CRD instance, and
+// kubernetes does not allow ':' in the name of the label. These labels are not
+// the canonical labels of the identity, but used to ease interaction with the
+// CRD object.
+func sanitizeK8sLabels(old map[string]string) (selected, skipped map[string]string) {
+ skipped = make(map[string]string, len(old))
+ selected = make(map[string]string, len(old))
+ for k, v := range old {
+ // Skip non-k8s labels.
+ // Skip synthesized labels for k8s namespace labels, since they contain user input which can result in the label
+ // name being longer than 63 characters.
+ if !strings.HasPrefix(k, k8sPrefix) || strings.HasPrefix(k, k8sNamespaceLabelPrefix) {
+ skipped[k] = v
+ continue // skip non-k8s labels
+ }
+ k = strings.TrimPrefix(k, k8sPrefix) // k8s: is redundant
+ selected[k] = v
+ }
+ return selected, skipped
+}
+
+// AllocateID will create an identity CRD, thus creating the identity for this
+// key-> ID mapping.
+// Note: the lock field is not supported with the k8s CRD allocator.
+func (c *crdBackend) AllocateID(ctx context.Context, id idpool.ID, key allocator.AllocatorKey) error {
+ selectedLabels, skippedLabels := sanitizeK8sLabels(key.GetAsMap())
+ log.WithField(logfields.Labels, skippedLabels).Info("Skipped non-kubernetes labels when labelling ciliumidentity. All labels will still be used in identity determination")
+
+ identity := &v2.CiliumIdentity{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: id.String(),
+ Labels: selectedLabels,
+ },
+ SecurityLabels: key.GetAsMap(),
+ }
+
+ _, err := c.Client.CiliumV2().CiliumIdentities().Create(ctx, identity, metav1.CreateOptions{})
+ return err
+}
+
+func (c *crdBackend) AllocateIDIfLocked(ctx context.Context, id idpool.ID, key allocator.AllocatorKey, lock kvstore.KVLocker) error {
+ return c.AllocateID(ctx, id, key)
+}
+
+// AcquireReference acquires a reference to the identity.
+func (c *crdBackend) AcquireReference(ctx context.Context, id idpool.ID, key allocator.AllocatorKey, lock kvstore.KVLocker) error {
+ // For CiliumIdentity-based allocation, the reference counting is
+ // handled via CiliumEndpoint. Any CiliumEndpoint referring to a
+ // CiliumIdentity will keep the CiliumIdentity alive. However,
+ // there is a brief window where a CiliumEndpoint may not exist
+ // for a given CiliumIdentity (according to the operator), in
+ // which case the operator marks the CiliumIdentity for deletion.
+ // This checks to see if the CiliumIdentity has been marked for
+ // deletion and removes the mark so that the CiliumIdentity can
+ // be safely used.
+ //
+ // NOTE: A race against using a CiliumIdentity that might otherwise
+ // be (immediately) deleted is prevented by the operator logic that
+ // validates the ResourceVersion of the CiliumIdentity before deleting
+ // it. If a CiliumIdentity does (eventually) get deleted by the
+ // operator, the agent will then have a chance to recreate it.
+ var (
+ ts string
+ ok bool
+ )
+ // check to see if the cached copy of the identity
+ // has the annotation
+ ci, exists, err := c.getById(ctx, id)
+ if err != nil {
+ return err
+ }
+ if !exists {
+ return fmt.Errorf("identity (id:%q,key:%q) does not exist", id, key)
+ }
+ ci = ci.DeepCopy()
+
+ ts, ok = ci.Annotations[HeartBeatAnnotation]
+ if ok {
+ log.WithField(logfields.Identity, ci).Infof("Identity marked for deletion (at %s); attempting to unmark it", ts)
+ delete(ci.Annotations, HeartBeatAnnotation)
+ _, err = c.Client.CiliumV2().CiliumIdentities().Update(ctx, ci, metav1.UpdateOptions{})
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (c *crdBackend) RunLocksGC(_ context.Context, _ map[string]kvstore.Value) (map[string]kvstore.Value, error) {
+ return nil, nil
+}
+
+func (c *crdBackend) RunGC(context.Context, *rate.Limiter, map[string]uint64, idpool.ID, idpool.ID) (map[string]uint64, *allocator.GCStats, error) {
+ return nil, nil, nil
+}
+
+// UpdateKey refreshes the reference that this node is using this key->ID
+// mapping. It assumes that the identity already exists but will recreate it if
+// reliablyMissing is true.
+// Note: the lock field is not supported with the k8s CRD allocator.
+func (c *crdBackend) UpdateKey(ctx context.Context, id idpool.ID, key allocator.AllocatorKey, reliablyMissing bool) error {
+ err := c.AcquireReference(ctx, id, key, nil)
+ if err == nil {
+ log.WithFields(logrus.Fields{
+ logfields.Identity: id,
+ logfields.Labels: key,
+ }).Debug("Acquired reference for identity")
+ return nil
+ }
+
+ // The CRD (aka the master key) is missing. Try to recover by recreating it
+ // if reliablyMissing is set.
+ log.WithError(err).WithFields(logrus.Fields{
+ logfields.Identity: id,
+ logfields.Labels: key,
+ }).Warning("Unable update CRD identity information with a reference for this node")
+
+ if reliablyMissing {
+ // Recreate a missing master key
+ if err = c.AllocateID(ctx, id, key); err != nil {
+ return fmt.Errorf("Unable recreate missing CRD identity %q->%q: %s", key, id, err)
+ }
+ return nil
+ }
+
+ return err
+}
+
+func (c *crdBackend) UpdateKeyIfLocked(ctx context.Context, id idpool.ID, key allocator.AllocatorKey, reliablyMissing bool, lock kvstore.KVLocker) error {
+ return c.UpdateKey(ctx, id, key, reliablyMissing)
+}
+
+// Lock does not return a lock object. Locking is not supported with the k8s
+// CRD allocator. It is here to meet interface requirements.
+func (c *crdBackend) Lock(ctx context.Context, key allocator.AllocatorKey) (kvstore.KVLocker, error) {
+ return &crdLock{}, nil
+}
+
+type crdLock struct{}
+
+// Unlock does not unlock a lock object. Locking is not supported with the k8s
+// CRD allocator. It is here to meet interface requirements.
+func (c *crdLock) Unlock(ctx context.Context) error {
+ return nil
+}
+
+// Comparator does nothing. Locking is not supported with the k8s
+// CRD allocator. It is here to meet interface requirements.
+func (c *crdLock) Comparator() interface{} {
+ return nil
+}
+
+// get returns the identity found for the given set of labels.
+// In the case of duplicate entries, return an identity entry
+// from a sorted list.
+func (c *crdBackend) get(ctx context.Context, key allocator.AllocatorKey) *v2.CiliumIdentity {
+ if c.Store == nil {
+ return nil
+ }
+
+ identities, err := c.Store.ByIndex(byKeyIndex, key.GetKey())
+ if err != nil || len(identities) == 0 {
+ return nil
+ }
+
+ sort.Slice(identities, func(i, j int) bool {
+ left, ok := identities[i].(*v2.CiliumIdentity)
+ if !ok {
+ return false
+ }
+
+ right, ok := identities[j].(*v2.CiliumIdentity)
+ if !ok {
+ return false
+ }
+
+ return left.CreationTimestamp.Before(&right.CreationTimestamp)
+ })
+
+ for _, identityObject := range identities {
+ identity, ok := identityObject.(*v2.CiliumIdentity)
+ if !ok {
+ return nil
+ }
+
+ if reflect.DeepEqual(identity.SecurityLabels, key.GetAsMap()) {
+ return identity
+ }
+ }
+ return nil
+}
+
+// Get returns the first ID which is allocated to a key in the identity CRDs in
+// kubernetes.
+// Note: the lock field is not supported with the k8s CRD allocator.
+func (c *crdBackend) Get(ctx context.Context, key allocator.AllocatorKey) (idpool.ID, error) {
+ identity := c.get(ctx, key)
+ if identity == nil {
+ return idpool.NoID, nil
+ }
+
+ id, err := strconv.ParseUint(identity.Name, 10, 64)
+ if err != nil {
+ return idpool.NoID, fmt.Errorf("unable to parse value '%s': %s", identity.Name, err)
+ }
+
+ return idpool.ID(id), nil
+}
+
+func (c *crdBackend) GetIfLocked(ctx context.Context, key allocator.AllocatorKey, lock kvstore.KVLocker) (idpool.ID, error) {
+ return c.Get(ctx, key)
+}
+
+// getById fetches the identities from the local store. Returns a nil `err` and
+// false `exists` if an Identity is not found for the given `id`.
+func (c *crdBackend) getById(ctx context.Context, id idpool.ID) (idty *v2.CiliumIdentity, exists bool, err error) {
+ if c.Store == nil {
+ return nil, false, fmt.Errorf("store is not available yet")
+ }
+
+ identityTemplate := &v2.CiliumIdentity{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: id.String(),
+ },
+ }
+
+ obj, exists, err := c.Store.Get(identityTemplate)
+ if err != nil {
+ return nil, exists, err
+ }
+ if !exists {
+ return nil, exists, nil
+ }
+
+ identity, ok := obj.(*v2.CiliumIdentity)
+ if !ok {
+ return nil, false, fmt.Errorf("invalid object")
+ }
+ return identity, true, nil
+}
+
+// GetByID returns the key associated with an ID. Returns nil if no key is
+// associated with the ID.
+// Note: the lock field is not supported with the k8s CRD allocator.
+func (c *crdBackend) GetByID(ctx context.Context, id idpool.ID) (allocator.AllocatorKey, error) {
+ identity, exists, err := c.getById(ctx, id)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, nil
+ }
+
+ return c.KeyFunc(identity.SecurityLabels), nil
+}
+
+// Release dissociates this node from using the identity bound to the given ID.
+// When an identity has no references it may be garbage collected.
+func (c *crdBackend) Release(ctx context.Context, id idpool.ID, key allocator.AllocatorKey) (err error) {
+ // For CiliumIdentity-based allocation, the reference counting is
+ // handled via CiliumEndpoint. Any CiliumEndpoint referring to a
+ // CiliumIdentity will keep the CiliumIdentity alive. No action is
+ // needed to release the reference here.
+ return nil
+}
+
+func getIdentitiesByKeyFunc(keyFunc func(map[string]string) allocator.AllocatorKey) func(obj interface{}) ([]string, error) {
+ return func(obj interface{}) ([]string, error) {
+ if identity, ok := obj.(*v2.CiliumIdentity); ok {
+ return []string{keyFunc(identity.SecurityLabels).GetKey()}, nil
+ }
+ return []string{}, fmt.Errorf("object other than CiliumIdentity was pushed to the store")
+ }
+}
+
+func (c *crdBackend) ListAndWatch(ctx context.Context, handler allocator.CacheMutations, stopChan chan struct{}) {
+ c.Store = cache.NewIndexer(
+ cache.DeletionHandlingMetaNamespaceKeyFunc,
+ cache.Indexers{byKeyIndex: getIdentitiesByKeyFunc(c.KeyFunc)})
+ identityInformer := informer.NewInformerWithStore(
+ k8sUtils.ListerWatcherFromTyped[*v2.CiliumIdentityList](c.Client.CiliumV2().CiliumIdentities()),
+ &v2.CiliumIdentity{},
+ 0,
+ cache.ResourceEventHandlerFuncs{
+ AddFunc: func(obj interface{}) {
+ if identity, ok := obj.(*v2.CiliumIdentity); ok {
+ if id, err := strconv.ParseUint(identity.Name, 10, 64); err == nil {
+ handler.OnAdd(idpool.ID(id), c.KeyFunc(identity.SecurityLabels))
+ }
+ }
+ },
+ UpdateFunc: func(oldObj, newObj interface{}) {
+ if oldIdentity, ok := newObj.(*v2.CiliumIdentity); ok {
+ if newIdentity, ok := newObj.(*v2.CiliumIdentity); ok {
+ if oldIdentity.DeepEqual(newIdentity) {
+ return
+ }
+ if id, err := strconv.ParseUint(newIdentity.Name, 10, 64); err == nil {
+ handler.OnModify(idpool.ID(id), c.KeyFunc(newIdentity.SecurityLabels))
+ }
+ }
+ }
+ },
+ DeleteFunc: func(obj interface{}) {
+ // The delete event is sometimes for items with unknown state that are
+ // deleted anyway.
+ if deleteObj, isDeleteObj := obj.(cache.DeletedFinalStateUnknown); isDeleteObj {
+ obj = deleteObj.Obj
+ }
+
+ if identity, ok := obj.(*v2.CiliumIdentity); ok {
+ if id, err := strconv.ParseUint(identity.Name, 10, 64); err == nil {
+ handler.OnDelete(idpool.ID(id), c.KeyFunc(identity.SecurityLabels))
+ }
+ } else {
+ log.Debugf("Ignoring unknown delete event %#v", obj)
+ }
+ },
+ },
+ nil,
+ c.Store,
+ )
+
+ go func() {
+ if ok := cache.WaitForCacheSync(stopChan, identityInformer.HasSynced); ok {
+ handler.OnListDone()
+ }
+ }()
+
+ identityInformer.Run(stopChan)
+}
+
+func (c *crdBackend) Status() (string, error) {
+ return "OK", nil
+}
+
+func (c *crdBackend) Encode(v string) string {
+ return v
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/informer/informer.go b/vendor/github.com/cilium/cilium/pkg/k8s/informer/informer.go
new file mode 100644
index 000000000..8bfaee32a
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/informer/informer.go
@@ -0,0 +1,135 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package informer
+
+import (
+ "errors"
+ "fmt"
+ "net/http"
+ "time"
+
+ k8sRuntime "k8s.io/apimachinery/pkg/runtime"
+ utilRuntime "k8s.io/apimachinery/pkg/util/runtime"
+ "k8s.io/client-go/tools/cache"
+
+ "github.com/cilium/cilium/pkg/logging"
+ "github.com/cilium/cilium/pkg/logging/logfields"
+)
+
+var log = logging.DefaultLogger.WithField(logfields.LogSubsys, "k8s")
+
+func init() {
+ utilRuntime.PanicHandlers = append(
+ utilRuntime.PanicHandlers,
+ func(r interface{}) {
+ // from k8s library
+ if err, ok := r.(error); ok && errors.Is(err, http.ErrAbortHandler) {
+ // honor the http.ErrAbortHandler sentinel panic value:
+ // ErrAbortHandler is a sentinel panic value to abort a handler.
+ // While any panic from ServeHTTP aborts the response to the client,
+ // panicking with ErrAbortHandler also suppresses logging of a stack trace to the server's error log.
+ return
+ }
+ log.Fatal("Panic in Kubernetes runtime handler")
+ },
+ )
+}
+
+// NewInformer is a copy of k8s.io/client-go/tools/cache/NewInformer includes the default cache MutationDetector.
+func NewInformer(
+ lw cache.ListerWatcher,
+ objType k8sRuntime.Object,
+ resyncPeriod time.Duration,
+ h cache.ResourceEventHandler,
+ transformer cache.TransformFunc,
+) (cache.Store, cache.Controller) {
+ // This will hold the client state, as we know it.
+ clientState := cache.NewStore(cache.DeletionHandlingMetaNamespaceKeyFunc)
+
+ return clientState, NewInformerWithStore(lw, objType, resyncPeriod, h, transformer, clientState)
+}
+
+// NewIndexerInformer is a copy of k8s.io/client-go/tools/cache/NewIndexerInformer but includes the
+// default cache MutationDetector.
+func NewIndexerInformer(
+ lw cache.ListerWatcher,
+ objType k8sRuntime.Object,
+ resyncPeriod time.Duration,
+ h cache.ResourceEventHandler,
+ transformer cache.TransformFunc,
+ indexers cache.Indexers,
+) (cache.Indexer, cache.Controller) {
+ clientState := cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, indexers)
+ return clientState, NewInformerWithStore(lw, objType, resyncPeriod, h, transformer, clientState)
+}
+
+// NewInformerWithStore uses the same arguments as NewInformer for which a caller can also set a
+// cache.Store and includes the default cache MutationDetector.
+func NewInformerWithStore(
+ lw cache.ListerWatcher,
+ objType k8sRuntime.Object,
+ resyncPeriod time.Duration,
+ h cache.ResourceEventHandler,
+ transformer cache.TransformFunc,
+ clientState cache.Store,
+) cache.Controller {
+
+ // This will hold incoming changes. Note how we pass clientState in as a
+ // KeyLister, that way resync operations will result in the correct set
+ // of update/delete deltas.
+ opts := cache.DeltaFIFOOptions{KeyFunction: cache.MetaNamespaceKeyFunc, KnownObjects: clientState}
+ fifo := cache.NewDeltaFIFOWithOptions(opts)
+
+ cacheMutationDetector := cache.NewCacheMutationDetector(fmt.Sprintf("%T", objType))
+
+ cfg := &cache.Config{
+ Queue: fifo,
+ ListerWatcher: lw,
+ ObjectType: objType,
+ FullResyncPeriod: resyncPeriod,
+ RetryOnError: false,
+
+ Process: func(obj interface{}, isInInitialList bool) error {
+ // from oldest to newest
+ for _, d := range obj.(cache.Deltas) {
+
+ var obj interface{}
+ if transformer != nil {
+ var err error
+ if obj, err = transformer(d.Object); err != nil {
+ return err
+ }
+ } else {
+ obj = d.Object
+ }
+
+ // In CI we detect if the objects were modified and panic
+ // this is a no-op in production environments.
+ cacheMutationDetector.AddObject(obj)
+
+ switch d.Type {
+ case cache.Sync, cache.Added, cache.Updated:
+ if old, exists, err := clientState.Get(obj); err == nil && exists {
+ if err := clientState.Update(obj); err != nil {
+ return err
+ }
+ h.OnUpdate(old, obj)
+ } else {
+ if err := clientState.Add(obj); err != nil {
+ return err
+ }
+ h.OnAdd(obj, isInInitialList)
+ }
+ case cache.Deleted:
+ if err := clientState.Delete(obj); err != nil {
+ return err
+ }
+ h.OnDelete(obj)
+ }
+ }
+ return nil
+ },
+ }
+ return cache.New(cfg)
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/metrics/metrics.go b/vendor/github.com/cilium/cilium/pkg/k8s/metrics/metrics.go
new file mode 100644
index 000000000..84310b2e6
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/metrics/metrics.go
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package metrics
+
+import (
+ "time"
+
+ "github.com/cilium/cilium/pkg/lock"
+)
+
+var (
+ // LastInteraction is the time at which the last apiserver interaction
+ // occurred
+ LastInteraction eventTimestamper
+ // LastSuccessInteraction is the time at which we have received a successful
+ // k8s apiserver reply (i.e. a response code 2xx or 4xx).
+ LastSuccessInteraction eventTimestamper
+)
+
+type eventTimestamper struct {
+ timestamp time.Time
+ lock lock.RWMutex
+}
+
+// Reset sets the timestamp to the current time
+func (e *eventTimestamper) Reset() {
+ e.lock.Lock()
+ e.timestamp = time.Now()
+ e.lock.Unlock()
+}
+
+// Time returns the timestamp as set per Reset()
+func (e *eventTimestamper) Time() time.Time {
+ e.lock.RLock()
+ t := e.timestamp
+ e.lock.RUnlock()
+ return t
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/generated.proto b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/generated.proto
index 719483287..d45e97cb6 100644
--- a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/generated.proto
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/generated.proto
@@ -418,10 +418,7 @@ message PodCondition {
optional string message = 6;
}
-// IP address information for entries in the (plural) PodIPs field.
-// Each entry includes:
-//
-// IP: An IP address allocated to the pod. Routable at least within the cluster.
+// PodIP represents a single IP address allocated to the pod.
message PodIP {
// ip is an IP address (IPv4 or IPv6) assigned to the pod
optional string ip = 1;
@@ -517,11 +514,13 @@ message PodStatus {
// +patchStrategy=merge
repeated PodCondition conditions = 2;
- // IP address of the host to which the pod is assigned. Empty if not yet scheduled.
+ // hostIP holds the IP address of the host to which the pod is assigned. Empty if the pod has not started yet.
+ // A pod can be assigned to a node that has a problem in kubelet which in turns mean that HostIP will
+ // not be updated even if there is a node is assigned to pod
// +optional
optional string hostIP = 5;
- // IP address allocated to the pod. Routable at least within the cluster.
+ // podIP address allocated to the pod. Routable at least within the cluster.
// Empty if not yet allocated.
// +optional
optional string podIP = 6;
@@ -792,10 +791,9 @@ message ServiceSpec {
// This feature depends on whether the underlying cloud-provider supports specifying
// the loadBalancerIP when a load balancer is created.
// This field will be ignored if the cloud-provider does not support the feature.
- // Deprecated: This field was under-specified and its meaning varies across implementations,
- // and it cannot support dual-stack.
- // As of Kubernetes v1.24, users are encouraged to use implementation-specific annotations when available.
- // This field may be removed in a future API version.
+ // Deprecated: This field was under-specified and its meaning varies across implementations.
+ // Using it is non-portable and it may not support dual-stack.
+ // Users are encouraged to use implementation-specific annotations when available.
// +optional
optional string loadBalancerIP = 8;
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/types.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/types.go
index a4e6d6918..0e2113581 100644
--- a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/types.go
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/types.go
@@ -309,10 +309,7 @@ type PodSpec struct {
HostNetwork bool `json:"hostNetwork,omitempty" protobuf:"varint,11,opt,name=hostNetwork"`
}
-// IP address information for entries in the (plural) PodIPs field.
-// Each entry includes:
-//
-// IP: An IP address allocated to the pod. Routable at least within the cluster.
+// PodIP represents a single IP address allocated to the pod.
type PodIP struct {
// ip is an IP address (IPv4 or IPv6) assigned to the pod
IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"`
@@ -348,10 +345,13 @@ type PodStatus struct {
// +patchMergeKey=type
// +patchStrategy=merge
Conditions []PodCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"`
- // IP address of the host to which the pod is assigned. Empty if not yet scheduled.
+ // hostIP holds the IP address of the host to which the pod is assigned. Empty if the pod has not started yet.
+ // A pod can be assigned to a node that has a problem in kubelet which in turns mean that HostIP will
+ // not be updated even if there is a node is assigned to pod
// +optional
HostIP string `json:"hostIP,omitempty" protobuf:"bytes,5,opt,name=hostIP"`
- // IP address allocated to the pod. Routable at least within the cluster.
+
+ // podIP address allocated to the pod. Routable at least within the cluster.
// Empty if not yet allocated.
// +optional
PodIP string `json:"podIP,omitempty" protobuf:"bytes,6,opt,name=podIP"`
@@ -712,10 +712,9 @@ type ServiceSpec struct {
// This feature depends on whether the underlying cloud-provider supports specifying
// the loadBalancerIP when a load balancer is created.
// This field will be ignored if the cloud-provider does not support the feature.
- // Deprecated: This field was under-specified and its meaning varies across implementations,
- // and it cannot support dual-stack.
- // As of Kubernetes v1.24, users are encouraged to use implementation-specific annotations when available.
- // This field may be removed in a future API version.
+ // Deprecated: This field was under-specified and its meaning varies across implementations.
+ // Using it is non-portable and it may not support dual-stack.
+ // Users are encouraged to use implementation-specific annotations when available.
// +optional
LoadBalancerIP string `json:"loadBalancerIP,omitempty" protobuf:"bytes,8,opt,name=loadBalancerIP"`
@@ -1048,7 +1047,6 @@ const (
NodeTerminated NodePhase = "Terminated"
)
-// +enum
type NodeConditionType string
// These are valid but not exhaustive conditions of node. A cloud provider may set a condition not listed here.
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1/doc.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1/doc.go
new file mode 100644
index 000000000..6474070a3
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1/doc.go
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// +k8s:deepcopy-gen=package
+// +k8s:protobuf-gen=package
+// +k8s:openapi-gen=true
+// +deepequal-gen=package
+// +groupName=discovery.k8s.io
+
+// Package v1 contains slimmer versions of k8s discovery types.
+package v1
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1/generated.pb.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1/generated.pb.go
new file mode 100644
index 000000000..13b934cc8
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1/generated.pb.go
@@ -0,0 +1,2092 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1/generated.proto
+
+package v1
+
+import (
+ fmt "fmt"
+
+ github_com_cilium_cilium_pkg_k8s_slim_k8s_api_core_v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1"
+
+ io "io"
+
+ proto "github.com/gogo/protobuf/proto"
+ github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
+
+ math "math"
+ math_bits "math/bits"
+ reflect "reflect"
+ strings "strings"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+func (m *Endpoint) Reset() { *m = Endpoint{} }
+func (*Endpoint) ProtoMessage() {}
+func (*Endpoint) Descriptor() ([]byte, []int) {
+ return fileDescriptor_824daf76e2aebd1d, []int{0}
+}
+func (m *Endpoint) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Endpoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *Endpoint) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Endpoint.Merge(m, src)
+}
+func (m *Endpoint) XXX_Size() int {
+ return m.Size()
+}
+func (m *Endpoint) XXX_DiscardUnknown() {
+ xxx_messageInfo_Endpoint.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Endpoint proto.InternalMessageInfo
+
+func (m *EndpointConditions) Reset() { *m = EndpointConditions{} }
+func (*EndpointConditions) ProtoMessage() {}
+func (*EndpointConditions) Descriptor() ([]byte, []int) {
+ return fileDescriptor_824daf76e2aebd1d, []int{1}
+}
+func (m *EndpointConditions) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *EndpointConditions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *EndpointConditions) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_EndpointConditions.Merge(m, src)
+}
+func (m *EndpointConditions) XXX_Size() int {
+ return m.Size()
+}
+func (m *EndpointConditions) XXX_DiscardUnknown() {
+ xxx_messageInfo_EndpointConditions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EndpointConditions proto.InternalMessageInfo
+
+func (m *EndpointHints) Reset() { *m = EndpointHints{} }
+func (*EndpointHints) ProtoMessage() {}
+func (*EndpointHints) Descriptor() ([]byte, []int) {
+ return fileDescriptor_824daf76e2aebd1d, []int{2}
+}
+func (m *EndpointHints) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *EndpointHints) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *EndpointHints) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_EndpointHints.Merge(m, src)
+}
+func (m *EndpointHints) XXX_Size() int {
+ return m.Size()
+}
+func (m *EndpointHints) XXX_DiscardUnknown() {
+ xxx_messageInfo_EndpointHints.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EndpointHints proto.InternalMessageInfo
+
+func (m *EndpointPort) Reset() { *m = EndpointPort{} }
+func (*EndpointPort) ProtoMessage() {}
+func (*EndpointPort) Descriptor() ([]byte, []int) {
+ return fileDescriptor_824daf76e2aebd1d, []int{3}
+}
+func (m *EndpointPort) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *EndpointPort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *EndpointPort) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_EndpointPort.Merge(m, src)
+}
+func (m *EndpointPort) XXX_Size() int {
+ return m.Size()
+}
+func (m *EndpointPort) XXX_DiscardUnknown() {
+ xxx_messageInfo_EndpointPort.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EndpointPort proto.InternalMessageInfo
+
+func (m *EndpointSlice) Reset() { *m = EndpointSlice{} }
+func (*EndpointSlice) ProtoMessage() {}
+func (*EndpointSlice) Descriptor() ([]byte, []int) {
+ return fileDescriptor_824daf76e2aebd1d, []int{4}
+}
+func (m *EndpointSlice) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *EndpointSlice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *EndpointSlice) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_EndpointSlice.Merge(m, src)
+}
+func (m *EndpointSlice) XXX_Size() int {
+ return m.Size()
+}
+func (m *EndpointSlice) XXX_DiscardUnknown() {
+ xxx_messageInfo_EndpointSlice.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EndpointSlice proto.InternalMessageInfo
+
+func (m *EndpointSliceList) Reset() { *m = EndpointSliceList{} }
+func (*EndpointSliceList) ProtoMessage() {}
+func (*EndpointSliceList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_824daf76e2aebd1d, []int{5}
+}
+func (m *EndpointSliceList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *EndpointSliceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *EndpointSliceList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_EndpointSliceList.Merge(m, src)
+}
+func (m *EndpointSliceList) XXX_Size() int {
+ return m.Size()
+}
+func (m *EndpointSliceList) XXX_DiscardUnknown() {
+ xxx_messageInfo_EndpointSliceList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EndpointSliceList proto.InternalMessageInfo
+
+func (m *ForZone) Reset() { *m = ForZone{} }
+func (*ForZone) ProtoMessage() {}
+func (*ForZone) Descriptor() ([]byte, []int) {
+ return fileDescriptor_824daf76e2aebd1d, []int{6}
+}
+func (m *ForZone) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ForZone) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ForZone) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ForZone.Merge(m, src)
+}
+func (m *ForZone) XXX_Size() int {
+ return m.Size()
+}
+func (m *ForZone) XXX_DiscardUnknown() {
+ xxx_messageInfo_ForZone.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ForZone proto.InternalMessageInfo
+
+func init() {
+ proto.RegisterType((*Endpoint)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.discovery.v1.Endpoint")
+ proto.RegisterMapType((map[string]string)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.discovery.v1.Endpoint.DeprecatedTopologyEntry")
+ proto.RegisterType((*EndpointConditions)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.discovery.v1.EndpointConditions")
+ proto.RegisterType((*EndpointHints)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.discovery.v1.EndpointHints")
+ proto.RegisterType((*EndpointPort)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.discovery.v1.EndpointPort")
+ proto.RegisterType((*EndpointSlice)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.discovery.v1.EndpointSlice")
+ proto.RegisterType((*EndpointSliceList)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.discovery.v1.EndpointSliceList")
+ proto.RegisterType((*ForZone)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.discovery.v1.ForZone")
+}
+
+func init() {
+ proto.RegisterFile("github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1/generated.proto", fileDescriptor_824daf76e2aebd1d)
+}
+
+var fileDescriptor_824daf76e2aebd1d = []byte{
+ // 847 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0x4d, 0x6f, 0xe4, 0x34,
+ 0x18, 0x1e, 0xef, 0x34, 0x34, 0xe3, 0xb4, 0x62, 0xd7, 0x42, 0x22, 0xaa, 0x50, 0x32, 0x1a, 0x09,
+ 0x69, 0xa4, 0x15, 0x89, 0xda, 0x03, 0xaa, 0x7a, 0x40, 0x6c, 0xb6, 0x5d, 0xed, 0x22, 0x28, 0x2b,
+ 0x6f, 0x4f, 0x2b, 0x0e, 0xa4, 0x89, 0x37, 0x75, 0x67, 0x62, 0x87, 0xd8, 0x33, 0xd2, 0x70, 0x42,
+ 0xfb, 0x07, 0xe0, 0x67, 0x70, 0xe3, 0xc6, 0x6f, 0xe8, 0x71, 0x8f, 0x7b, 0x8a, 0x68, 0xf8, 0x09,
+ 0xdc, 0x7a, 0x42, 0x76, 0xbe, 0xa6, 0x4c, 0x11, 0xea, 0x30, 0xa7, 0xd8, 0x8f, 0x5f, 0x3f, 0xef,
+ 0xf3, 0x3e, 0xaf, 0x6d, 0x05, 0x7e, 0x95, 0x50, 0x79, 0x31, 0x3b, 0xf7, 0x22, 0x9e, 0xfa, 0x11,
+ 0x9d, 0xd2, 0x59, 0xfb, 0xc9, 0x26, 0x89, 0x3f, 0x39, 0x14, 0xbe, 0x98, 0xd2, 0x54, 0x0f, 0xc2,
+ 0x8c, 0xfa, 0x31, 0x15, 0x11, 0x9f, 0x93, 0x7c, 0xe1, 0xcf, 0xf7, 0xfd, 0x84, 0x30, 0x92, 0x87,
+ 0x92, 0xc4, 0x5e, 0x96, 0x73, 0xc9, 0xd1, 0x51, 0xc7, 0xe5, 0x55, 0x24, 0xcd, 0x27, 0x9b, 0x24,
+ 0xde, 0xe4, 0x50, 0x78, 0x8a, 0x4b, 0x0f, 0xc2, 0x8c, 0x7a, 0x2d, 0x97, 0x37, 0xdf, 0xdf, 0x3b,
+ 0xb9, 0x9f, 0x8e, 0x88, 0xe7, 0xe4, 0x0e, 0x09, 0x7b, 0xcf, 0xee, 0x45, 0x23, 0xfc, 0x94, 0xc8,
+ 0xf0, 0x2e, 0x9e, 0xcf, 0x96, 0x78, 0x12, 0x9e, 0x70, 0x5f, 0xc3, 0xe7, 0xb3, 0x37, 0x7a, 0xa6,
+ 0x27, 0x7a, 0x54, 0x87, 0x2b, 0x42, 0x8f, 0x72, 0xc5, 0x99, 0x86, 0xd1, 0x05, 0x65, 0xca, 0x21,
+ 0x95, 0x31, 0x9f, 0x31, 0x49, 0x53, 0xb2, 0xc2, 0xff, 0xf9, 0x7f, 0x6d, 0x10, 0xd1, 0x05, 0x49,
+ 0xc3, 0x7f, 0xee, 0x1b, 0x95, 0x5b, 0xd0, 0x3c, 0x61, 0x71, 0xc6, 0x29, 0x93, 0xe8, 0x31, 0x1c,
+ 0x84, 0x71, 0x9c, 0x13, 0x21, 0x88, 0xb0, 0xc1, 0xb0, 0x3f, 0x1e, 0x04, 0xbb, 0x65, 0xe1, 0x0e,
+ 0x9e, 0x34, 0x20, 0xee, 0xd6, 0xd1, 0x5b, 0x00, 0x61, 0xc4, 0x59, 0x4c, 0x25, 0xe5, 0x4c, 0xd8,
+ 0x0f, 0x86, 0x60, 0x6c, 0x1d, 0x9c, 0x7a, 0xeb, 0xb7, 0xcc, 0x6b, 0x74, 0x3c, 0x6d, 0x59, 0x03,
+ 0x74, 0x55, 0xb8, 0xbd, 0xb2, 0x70, 0x61, 0x87, 0xe1, 0xa5, 0xac, 0xe8, 0x57, 0x00, 0x51, 0x4c,
+ 0xb2, 0x9c, 0x44, 0xaa, 0xa6, 0x33, 0x9e, 0xf1, 0x29, 0x4f, 0x16, 0xb6, 0x31, 0xec, 0x8f, 0xad,
+ 0x83, 0xef, 0x36, 0x21, 0xc6, 0x3b, 0x5e, 0xa1, 0x3f, 0x61, 0x32, 0x5f, 0x04, 0x7b, 0xb5, 0x34,
+ 0xb4, 0x1a, 0x80, 0xef, 0xd0, 0x84, 0xc6, 0xd0, 0x64, 0x3c, 0x26, 0xa7, 0x61, 0x4a, 0xec, 0x0f,
+ 0x86, 0x60, 0x3c, 0x08, 0x76, 0xca, 0xc2, 0x35, 0x4f, 0x6b, 0x0c, 0xb7, 0xab, 0xe8, 0x13, 0xb8,
+ 0xf5, 0x23, 0x67, 0xc4, 0xde, 0xd6, 0x51, 0x66, 0x59, 0xb8, 0x5b, 0xaf, 0x39, 0x23, 0x58, 0xa3,
+ 0xe8, 0x12, 0x1a, 0x17, 0x94, 0x49, 0x61, 0x9b, 0xda, 0xf1, 0x17, 0x9b, 0x28, 0xf2, 0xb9, 0x22,
+ 0x0c, 0x06, 0x65, 0xe1, 0x1a, 0x7a, 0x88, 0xab, 0x14, 0x7b, 0x27, 0xf0, 0xe3, 0x7f, 0x29, 0x1f,
+ 0x3d, 0x84, 0xfd, 0x09, 0x59, 0xd8, 0x40, 0x69, 0xc4, 0x6a, 0x88, 0x3e, 0x82, 0xc6, 0x3c, 0x9c,
+ 0xce, 0x88, 0x3e, 0x0a, 0x03, 0x5c, 0x4d, 0x8e, 0x1e, 0x1c, 0x82, 0xd1, 0xcf, 0x00, 0xa2, 0xd5,
+ 0xe6, 0x22, 0x17, 0x1a, 0x39, 0x09, 0xe3, 0x8a, 0xc4, 0xac, 0xd2, 0x63, 0x05, 0xe0, 0x0a, 0x47,
+ 0x9f, 0xc2, 0x6d, 0x41, 0xf2, 0x39, 0x65, 0x89, 0xe6, 0x34, 0x03, 0xab, 0x2c, 0xdc, 0xed, 0x57,
+ 0x15, 0x84, 0x9b, 0x35, 0xb4, 0x0f, 0x2d, 0x49, 0xf2, 0x94, 0xb2, 0x50, 0xaa, 0xd0, 0xbe, 0x0e,
+ 0xfd, 0xb0, 0x2c, 0x5c, 0xeb, 0xac, 0x83, 0xf1, 0x72, 0xcc, 0xe8, 0x2d, 0x80, 0xbb, 0xb7, 0x8a,
+ 0x47, 0x3f, 0x40, 0xf3, 0x0d, 0xcf, 0x95, 0xcf, 0xd5, 0xd1, 0xb7, 0x0e, 0x9e, 0xfe, 0x1f, 0x67,
+ 0x9f, 0x55, 0x5c, 0xc1, 0xc3, 0xfa, 0x94, 0x98, 0x35, 0x20, 0x70, 0x9b, 0x66, 0xf4, 0x3b, 0x80,
+ 0x3b, 0x8d, 0x88, 0x97, 0x3c, 0x97, 0xaa, 0xf1, 0x4c, 0x1d, 0x0f, 0xd0, 0x35, 0x5e, 0x1f, 0x0d,
+ 0x8d, 0xa2, 0x4b, 0x68, 0xea, 0x3b, 0x1b, 0xf1, 0x69, 0x65, 0x71, 0x70, 0xaa, 0x88, 0x5f, 0xd6,
+ 0xd8, 0x4d, 0xe1, 0x7e, 0xb1, 0xd6, 0x9b, 0xe7, 0x35, 0x0c, 0xb8, 0xe5, 0x57, 0x4a, 0x32, 0x9e,
+ 0x4b, 0xed, 0xa5, 0x51, 0x29, 0x51, 0x0a, 0xb1, 0x46, 0x47, 0xbf, 0xf5, 0x3b, 0xf7, 0x5e, 0x4d,
+ 0x69, 0x44, 0x50, 0x0e, 0x4d, 0xf5, 0xf2, 0xc5, 0xa1, 0x0c, 0xb5, 0x7a, 0xeb, 0x20, 0xb8, 0x9f,
+ 0x7b, 0xc2, 0x53, 0xfb, 0x95, 0x73, 0xdf, 0x9e, 0x5f, 0x92, 0x48, 0x7e, 0x43, 0x64, 0xd8, 0xdd,
+ 0xfe, 0x0e, 0xc3, 0x6d, 0x1e, 0x74, 0x0c, 0xad, 0xfa, 0x35, 0x3a, 0x5b, 0x64, 0xc4, 0xde, 0xd2,
+ 0x96, 0x8c, 0xea, 0x2d, 0xd6, 0x93, 0x6e, 0xe9, 0xe6, 0xf6, 0x14, 0x2f, 0x6f, 0x43, 0x33, 0x38,
+ 0x20, 0x75, 0x29, 0xea, 0x11, 0x53, 0x8d, 0x3f, 0xde, 0xc4, 0x95, 0x0a, 0x1e, 0xd5, 0x4a, 0x06,
+ 0x0d, 0x22, 0x70, 0x97, 0x09, 0xa5, 0xd0, 0x50, 0x56, 0x0a, 0xbb, 0xaf, 0x53, 0x3e, 0xdf, 0x44,
+ 0x4a, 0xd5, 0xa1, 0x60, 0xb7, 0x4e, 0x6b, 0xa8, 0x99, 0xc0, 0x55, 0x96, 0xd1, 0x5f, 0x00, 0x3e,
+ 0xba, 0xd5, 0xb1, 0xaf, 0xa9, 0x90, 0x88, 0xad, 0x74, 0xed, 0xcb, 0x75, 0xbb, 0xa6, 0xf8, 0x74,
+ 0xcf, 0xda, 0x03, 0xdf, 0x20, 0x4b, 0x1d, 0x63, 0xd0, 0xa0, 0x92, 0xa4, 0x8d, 0xcf, 0x1b, 0x79,
+ 0xba, 0x74, 0x35, 0x5d, 0xd5, 0x2f, 0x14, 0x3f, 0xae, 0xd2, 0x8c, 0x1e, 0xc3, 0xed, 0xfa, 0xda,
+ 0xa1, 0xe1, 0xad, 0xab, 0xb5, 0x53, 0x87, 0x2f, 0x5d, 0xaf, 0xe0, 0xfb, 0xab, 0x6b, 0xa7, 0xf7,
+ 0xee, 0xda, 0xe9, 0xbd, 0xbf, 0x76, 0x7a, 0x3f, 0x95, 0x0e, 0xb8, 0x2a, 0x1d, 0xf0, 0xae, 0x74,
+ 0xc0, 0xfb, 0xd2, 0x01, 0x7f, 0x94, 0x0e, 0xf8, 0xe5, 0x4f, 0xa7, 0xf7, 0xfa, 0x68, 0xfd, 0xdf,
+ 0x9b, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0x58, 0x2a, 0x4a, 0x0d, 0x1b, 0x09, 0x00, 0x00,
+}
+
+func (m *Endpoint) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Endpoint) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Endpoint) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Hints != nil {
+ {
+ size, err := m.Hints.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x42
+ }
+ if m.Zone != nil {
+ i -= len(*m.Zone)
+ copy(dAtA[i:], *m.Zone)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Zone)))
+ i--
+ dAtA[i] = 0x3a
+ }
+ if m.NodeName != nil {
+ i -= len(*m.NodeName)
+ copy(dAtA[i:], *m.NodeName)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.NodeName)))
+ i--
+ dAtA[i] = 0x32
+ }
+ if len(m.DeprecatedTopology) > 0 {
+ keysForDeprecatedTopology := make([]string, 0, len(m.DeprecatedTopology))
+ for k := range m.DeprecatedTopology {
+ keysForDeprecatedTopology = append(keysForDeprecatedTopology, string(k))
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForDeprecatedTopology)
+ for iNdEx := len(keysForDeprecatedTopology) - 1; iNdEx >= 0; iNdEx-- {
+ v := m.DeprecatedTopology[string(keysForDeprecatedTopology[iNdEx])]
+ baseI := i
+ i -= len(v)
+ copy(dAtA[i:], v)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(v)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(keysForDeprecatedTopology[iNdEx])
+ copy(dAtA[i:], keysForDeprecatedTopology[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(keysForDeprecatedTopology[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0x2a
+ }
+ }
+ {
+ size, err := m.Conditions.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ if len(m.Addresses) > 0 {
+ for iNdEx := len(m.Addresses) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Addresses[iNdEx])
+ copy(dAtA[i:], m.Addresses[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Addresses[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *EndpointConditions) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *EndpointConditions) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *EndpointConditions) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Terminating != nil {
+ i--
+ if *m.Terminating {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x18
+ }
+ if m.Serving != nil {
+ i--
+ if *m.Serving {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x10
+ }
+ if m.Ready != nil {
+ i--
+ if *m.Ready {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *EndpointHints) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *EndpointHints) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *EndpointHints) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.ForZones) > 0 {
+ for iNdEx := len(m.ForZones) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.ForZones[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *EndpointPort) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *EndpointPort) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *EndpointPort) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Port != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.Port))
+ i--
+ dAtA[i] = 0x18
+ }
+ if m.Protocol != nil {
+ i -= len(*m.Protocol)
+ copy(dAtA[i:], *m.Protocol)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Protocol)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.Name != nil {
+ i -= len(*m.Name)
+ copy(dAtA[i:], *m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Name)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *EndpointSlice) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *EndpointSlice) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *EndpointSlice) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.AddressType)
+ copy(dAtA[i:], m.AddressType)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.AddressType)))
+ i--
+ dAtA[i] = 0x22
+ if len(m.Ports) > 0 {
+ for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if len(m.Endpoints) > 0 {
+ for iNdEx := len(m.Endpoints) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Endpoints[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *EndpointSliceList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *EndpointSliceList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *EndpointSliceList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ForZone) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ForZone) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ForZone) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
+ offset -= sovGenerated(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *Endpoint) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Addresses) > 0 {
+ for _, s := range m.Addresses {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ l = m.Conditions.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.DeprecatedTopology) > 0 {
+ for k, v := range m.DeprecatedTopology {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ if m.NodeName != nil {
+ l = len(*m.NodeName)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Zone != nil {
+ l = len(*m.Zone)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Hints != nil {
+ l = m.Hints.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *EndpointConditions) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Ready != nil {
+ n += 2
+ }
+ if m.Serving != nil {
+ n += 2
+ }
+ if m.Terminating != nil {
+ n += 2
+ }
+ return n
+}
+
+func (m *EndpointHints) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.ForZones) > 0 {
+ for _, e := range m.ForZones {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *EndpointPort) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Name != nil {
+ l = len(*m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Protocol != nil {
+ l = len(*m.Protocol)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Port != nil {
+ n += 1 + sovGenerated(uint64(*m.Port))
+ }
+ return n
+}
+
+func (m *EndpointSlice) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Endpoints) > 0 {
+ for _, e := range m.Endpoints {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.Ports) > 0 {
+ for _, e := range m.Ports {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ l = len(m.AddressType)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *EndpointSliceList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ForZone) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func sovGenerated(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozGenerated(x uint64) (n int) {
+ return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *Endpoint) String() string {
+ if this == nil {
+ return "nil"
+ }
+ keysForDeprecatedTopology := make([]string, 0, len(this.DeprecatedTopology))
+ for k := range this.DeprecatedTopology {
+ keysForDeprecatedTopology = append(keysForDeprecatedTopology, k)
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForDeprecatedTopology)
+ mapStringForDeprecatedTopology := "map[string]string{"
+ for _, k := range keysForDeprecatedTopology {
+ mapStringForDeprecatedTopology += fmt.Sprintf("%v: %v,", k, this.DeprecatedTopology[k])
+ }
+ mapStringForDeprecatedTopology += "}"
+ s := strings.Join([]string{`&Endpoint{`,
+ `Addresses:` + fmt.Sprintf("%v", this.Addresses) + `,`,
+ `Conditions:` + strings.Replace(strings.Replace(this.Conditions.String(), "EndpointConditions", "EndpointConditions", 1), `&`, ``, 1) + `,`,
+ `DeprecatedTopology:` + mapStringForDeprecatedTopology + `,`,
+ `NodeName:` + valueToStringGenerated(this.NodeName) + `,`,
+ `Zone:` + valueToStringGenerated(this.Zone) + `,`,
+ `Hints:` + strings.Replace(this.Hints.String(), "EndpointHints", "EndpointHints", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *EndpointConditions) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&EndpointConditions{`,
+ `Ready:` + valueToStringGenerated(this.Ready) + `,`,
+ `Serving:` + valueToStringGenerated(this.Serving) + `,`,
+ `Terminating:` + valueToStringGenerated(this.Terminating) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *EndpointHints) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForForZones := "[]ForZone{"
+ for _, f := range this.ForZones {
+ repeatedStringForForZones += strings.Replace(strings.Replace(f.String(), "ForZone", "ForZone", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForForZones += "}"
+ s := strings.Join([]string{`&EndpointHints{`,
+ `ForZones:` + repeatedStringForForZones + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *EndpointPort) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&EndpointPort{`,
+ `Name:` + valueToStringGenerated(this.Name) + `,`,
+ `Protocol:` + valueToStringGenerated(this.Protocol) + `,`,
+ `Port:` + valueToStringGenerated(this.Port) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *EndpointSlice) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForEndpoints := "[]Endpoint{"
+ for _, f := range this.Endpoints {
+ repeatedStringForEndpoints += strings.Replace(strings.Replace(f.String(), "Endpoint", "Endpoint", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForEndpoints += "}"
+ repeatedStringForPorts := "[]EndpointPort{"
+ for _, f := range this.Ports {
+ repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "EndpointPort", "EndpointPort", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForPorts += "}"
+ s := strings.Join([]string{`&EndpointSlice{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Endpoints:` + repeatedStringForEndpoints + `,`,
+ `Ports:` + repeatedStringForPorts + `,`,
+ `AddressType:` + fmt.Sprintf("%v", this.AddressType) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *EndpointSliceList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]EndpointSlice{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "EndpointSlice", "EndpointSlice", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&EndpointSliceList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ForZone) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ForZone{`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func valueToStringGenerated(v interface{}) string {
+ rv := reflect.ValueOf(v)
+ if rv.IsNil() {
+ return "nil"
+ }
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("*%v", pv)
+}
+func (m *Endpoint) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Endpoint: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Endpoint: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Addresses = append(m.Addresses, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Conditions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedTopology", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.DeprecatedTopology == nil {
+ m.DeprecatedTopology = make(map[string]string)
+ }
+ var mapkey string
+ var mapvalue string
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+ m.DeprecatedTopology[mapkey] = mapvalue
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.NodeName = &s
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Zone", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.Zone = &s
+ iNdEx = postIndex
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Hints", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Hints == nil {
+ m.Hints = &EndpointHints{}
+ }
+ if err := m.Hints.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *EndpointConditions) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: EndpointConditions: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: EndpointConditions: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Ready", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ b := bool(v != 0)
+ m.Ready = &b
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Serving", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ b := bool(v != 0)
+ m.Serving = &b
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Terminating", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ b := bool(v != 0)
+ m.Terminating = &b
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *EndpointHints) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: EndpointHints: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: EndpointHints: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ForZones", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ForZones = append(m.ForZones, ForZone{})
+ if err := m.ForZones[len(m.ForZones)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *EndpointPort) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: EndpointPort: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: EndpointPort: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.Name = &s
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := github_com_cilium_cilium_pkg_k8s_slim_k8s_api_core_v1.Protocol(dAtA[iNdEx:postIndex])
+ m.Protocol = &s
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Port = &v
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *EndpointSlice) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: EndpointSlice: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: EndpointSlice: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Endpoints", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Endpoints = append(m.Endpoints, Endpoint{})
+ if err := m.Endpoints[len(m.Endpoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Ports = append(m.Ports, EndpointPort{})
+ if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AddressType", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.AddressType = AddressType(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *EndpointSliceList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: EndpointSliceList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: EndpointSliceList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, EndpointSlice{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ForZone) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ForZone: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ForZone: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipGenerated(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthGenerated
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupGenerated
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthGenerated
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1/generated.proto b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1/generated.proto
new file mode 100644
index 000000000..47b3998f8
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1/generated.proto
@@ -0,0 +1,161 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = "proto2";
+
+package github.com.cilium.cilium.pkg.k8s.slim.k8s.api.discovery.v1;
+
+import "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/generated.proto";
+import "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1";
+
+// Endpoint represents a single logical "backend" implementing a service.
+message Endpoint {
+ // addresses of this endpoint. The contents of this field are interpreted
+ // according to the corresponding EndpointSlice addressType field. Consumers
+ // must handle different types of addresses in the context of their own
+ // capabilities. This must contain at least one address but no more than
+ // 100. These are all assumed to be fungible and clients may choose to only
+ // use the first element. Refer to: https://issue.k8s.io/106267
+ // +listType=set
+ repeated string addresses = 1;
+
+ // conditions contains information about the current status of the endpoint.
+ optional EndpointConditions conditions = 2;
+
+ // deprecatedTopology contains topology information part of the v1beta1
+ // API. This field is deprecated, and will be removed when the v1beta1
+ // API is removed (no sooner than kubernetes v1.24). While this field can
+ // hold values, it is not writable through the v1 API, and any attempts to
+ // write to it will be silently ignored. Topology information can be found
+ // in the zone and nodeName fields instead.
+ // +optional
+ map deprecatedTopology = 5;
+
+ // nodeName represents the name of the Node hosting this endpoint. This can
+ // be used to determine endpoints local to a Node.
+ // +optional
+ optional string nodeName = 6;
+
+ // zone is the name of the Zone this endpoint exists in.
+ // +optional
+ optional string zone = 7;
+
+ // hints contains information associated with how an endpoint should be
+ // consumed.
+ // +optional
+ optional EndpointHints hints = 8;
+}
+
+// EndpointConditions represents the current condition of an endpoint.
+message EndpointConditions {
+ // ready indicates that this endpoint is prepared to receive traffic,
+ // according to whatever system is managing the endpoint. A nil value
+ // indicates an unknown state. In most cases consumers should interpret this
+ // unknown state as ready. For compatibility reasons, ready should never be
+ // "true" for terminating endpoints, except when the normal readiness
+ // behavior is being explicitly overridden, for example when the associated
+ // Service has set the publishNotReadyAddresses flag.
+ // +optional
+ optional bool ready = 1;
+
+ // serving is identical to ready except that it is set regardless of the
+ // terminating state of endpoints. This condition should be set to true for
+ // a ready endpoint that is terminating. If nil, consumers should defer to
+ // the ready condition.
+ // +optional
+ optional bool serving = 2;
+
+ // terminating indicates that this endpoint is terminating. A nil value
+ // indicates an unknown state. Consumers should interpret this unknown state
+ // to mean that the endpoint is not terminating.
+ // +optional
+ optional bool terminating = 3;
+}
+
+// EndpointHints provides hints describing how an endpoint should be consumed.
+message EndpointHints {
+ // forZones indicates the zone(s) this endpoint should be consumed by to
+ // enable topology aware routing.
+ // +listType=atomic
+ repeated ForZone forZones = 1;
+}
+
+// EndpointPort represents a Port used by an EndpointSlice
+// +structType=atomic
+message EndpointPort {
+ // name represents the name of this port. All ports in an EndpointSlice must have a unique name.
+ // If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name.
+ // Name must either be an empty string or pass DNS_LABEL validation:
+ // * must be no more than 63 characters long.
+ // * must consist of lower case alphanumeric characters or '-'.
+ // * must start and end with an alphanumeric character.
+ // Default is empty string.
+ optional string name = 1;
+
+ // protocol represents the IP protocol for this port.
+ // Must be UDP, TCP, or SCTP.
+ // Default is TCP.
+ optional string protocol = 2;
+
+ // port represents the port number of the endpoint.
+ // If this is not specified, ports are not restricted and must be
+ // interpreted in the context of the specific consumer.
+ optional int32 port = 3;
+}
+
+// EndpointSlice represents a subset of the endpoints that implement a service.
+// For a given service there may be multiple EndpointSlice objects, selected by
+// labels, which must be joined to produce the full set of endpoints.
+message EndpointSlice {
+ // Standard object's metadata.
+ // +optional
+ optional github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // addressType specifies the type of address carried by this EndpointSlice.
+ // All addresses in this slice must be the same type. This field is
+ // immutable after creation. The following address types are currently
+ // supported:
+ // * IPv4: Represents an IPv4 Address.
+ // * IPv6: Represents an IPv6 Address.
+ // * FQDN: Represents a Fully Qualified Domain Name.
+ optional string addressType = 4;
+
+ // endpoints is a list of unique endpoints in this slice. Each slice may
+ // include a maximum of 1000 endpoints.
+ // +listType=atomic
+ repeated Endpoint endpoints = 2;
+
+ // ports specifies the list of network ports exposed by each endpoint in
+ // this slice. Each port must have a unique name. When ports is empty, it
+ // indicates that there are no defined ports. When a port is defined with a
+ // nil port value, it indicates "all ports". Each slice may include a
+ // maximum of 100 ports.
+ // +optional
+ // +listType=atomic
+ repeated EndpointPort ports = 3;
+}
+
+// EndpointSliceList represents a list of endpoint slices
+message EndpointSliceList {
+ // Standard list metadata.
+ // +optional
+ optional github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.ListMeta metadata = 1;
+
+ // items is the list of endpoint slices
+ repeated EndpointSlice items = 2;
+}
+
+// ForZone provides information about which zones should consume this endpoint.
+message ForZone {
+ // name represents the name of the zone.
+ optional string name = 1;
+}
+
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1/register.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1/register.go
new file mode 100644
index 000000000..228e1cacf
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1/register.go
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Copyright 2019 The Kubernetes Authors.
+
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// GroupName is the group name used in this package
+const GroupName = "discovery.k8s.io"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
+
+// Kind takes an unqualified kind and returns a Group qualified GroupKind
+func Kind(kind string) schema.GroupKind {
+ return SchemeGroupVersion.WithKind(kind).GroupKind()
+}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+ return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+ // SchemeBuilder is the scheme builder with scheme init functions to run for this API package
+ SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
+ // AddToScheme is a common registration function for mapping packaged scoped group & version keys to a scheme
+ AddToScheme = SchemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to the given scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &EndpointSlice{},
+ &EndpointSliceList{},
+ )
+ metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1/types.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1/types.go
new file mode 100644
index 000000000..b0cea5798
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1/types.go
@@ -0,0 +1,176 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Copyright 2019 The Kubernetes Authors.
+
+package v1
+
+import (
+ slim_corev1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1"
+ slim_metav1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1"
+)
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// EndpointSlice represents a subset of the endpoints that implement a service.
+// For a given service there may be multiple EndpointSlice objects, selected by
+// labels, which must be joined to produce the full set of endpoints.
+type EndpointSlice struct {
+ slim_metav1.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ // +optional
+ slim_metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+ // addressType specifies the type of address carried by this EndpointSlice.
+ // All addresses in this slice must be the same type. This field is
+ // immutable after creation. The following address types are currently
+ // supported:
+ // * IPv4: Represents an IPv4 Address.
+ // * IPv6: Represents an IPv6 Address.
+ // * FQDN: Represents a Fully Qualified Domain Name.
+ AddressType AddressType `json:"addressType" protobuf:"bytes,4,rep,name=addressType"`
+
+ // endpoints is a list of unique endpoints in this slice. Each slice may
+ // include a maximum of 1000 endpoints.
+ // +listType=atomic
+ Endpoints []Endpoint `json:"endpoints" protobuf:"bytes,2,rep,name=endpoints"`
+
+ // ports specifies the list of network ports exposed by each endpoint in
+ // this slice. Each port must have a unique name. When ports is empty, it
+ // indicates that there are no defined ports. When a port is defined with a
+ // nil port value, it indicates "all ports". Each slice may include a
+ // maximum of 100 ports.
+ // +optional
+ // +listType=atomic
+ Ports []EndpointPort `json:"ports" protobuf:"bytes,3,rep,name=ports"`
+}
+
+// AddressType represents the type of address referred to by an endpoint.
+// +enum
+type AddressType string
+
+const (
+ // AddressTypeIPv4 represents an IPv4 Address.
+ AddressTypeIPv4 = AddressType(slim_corev1.IPv4Protocol)
+
+ // AddressTypeIPv6 represents an IPv6 Address.
+ AddressTypeIPv6 = AddressType(slim_corev1.IPv6Protocol)
+
+ // AddressTypeFQDN represents a FQDN.
+ AddressTypeFQDN = AddressType("FQDN")
+)
+
+// Endpoint represents a single logical "backend" implementing a service.
+type Endpoint struct {
+ // addresses of this endpoint. The contents of this field are interpreted
+ // according to the corresponding EndpointSlice addressType field. Consumers
+ // must handle different types of addresses in the context of their own
+ // capabilities. This must contain at least one address but no more than
+ // 100. These are all assumed to be fungible and clients may choose to only
+ // use the first element. Refer to: https://issue.k8s.io/106267
+ // +listType=set
+ Addresses []string `json:"addresses" protobuf:"bytes,1,rep,name=addresses"`
+
+ // conditions contains information about the current status of the endpoint.
+ Conditions EndpointConditions `json:"conditions,omitempty" protobuf:"bytes,2,opt,name=conditions"`
+
+ // deprecatedTopology contains topology information part of the v1beta1
+ // API. This field is deprecated, and will be removed when the v1beta1
+ // API is removed (no sooner than kubernetes v1.24). While this field can
+ // hold values, it is not writable through the v1 API, and any attempts to
+ // write to it will be silently ignored. Topology information can be found
+ // in the zone and nodeName fields instead.
+ // +optional
+ DeprecatedTopology map[string]string `json:"deprecatedTopology,omitempty" protobuf:"bytes,5,opt,name=deprecatedTopology"`
+
+ // nodeName represents the name of the Node hosting this endpoint. This can
+ // be used to determine endpoints local to a Node.
+ // +optional
+ NodeName *string `json:"nodeName,omitempty" protobuf:"bytes,6,opt,name=nodeName"`
+
+ // zone is the name of the Zone this endpoint exists in.
+ // +optional
+ Zone *string `json:"zone,omitempty" protobuf:"bytes,7,opt,name=zone"`
+
+ // hints contains information associated with how an endpoint should be
+ // consumed.
+ // +optional
+ Hints *EndpointHints `json:"hints,omitempty" protobuf:"bytes,8,opt,name=hints"`
+}
+
+// EndpointConditions represents the current condition of an endpoint.
+type EndpointConditions struct {
+ // ready indicates that this endpoint is prepared to receive traffic,
+ // according to whatever system is managing the endpoint. A nil value
+ // indicates an unknown state. In most cases consumers should interpret this
+ // unknown state as ready. For compatibility reasons, ready should never be
+ // "true" for terminating endpoints, except when the normal readiness
+ // behavior is being explicitly overridden, for example when the associated
+ // Service has set the publishNotReadyAddresses flag.
+ // +optional
+ Ready *bool `json:"ready,omitempty" protobuf:"bytes,1,name=ready"`
+
+ // serving is identical to ready except that it is set regardless of the
+ // terminating state of endpoints. This condition should be set to true for
+ // a ready endpoint that is terminating. If nil, consumers should defer to
+ // the ready condition.
+ // +optional
+ Serving *bool `json:"serving,omitempty" protobuf:"bytes,2,name=serving"`
+
+ // terminating indicates that this endpoint is terminating. A nil value
+ // indicates an unknown state. Consumers should interpret this unknown state
+ // to mean that the endpoint is not terminating.
+ // +optional
+ Terminating *bool `json:"terminating,omitempty" protobuf:"bytes,3,name=terminating"`
+}
+
+// EndpointHints provides hints describing how an endpoint should be consumed.
+type EndpointHints struct {
+ // forZones indicates the zone(s) this endpoint should be consumed by to
+ // enable topology aware routing.
+ // +listType=atomic
+ ForZones []ForZone `json:"forZones,omitempty" protobuf:"bytes,1,name=forZones"`
+}
+
+// ForZone provides information about which zones should consume this endpoint.
+type ForZone struct {
+ // name represents the name of the zone.
+ Name string `json:"name" protobuf:"bytes,1,name=name"`
+}
+
+// EndpointPort represents a Port used by an EndpointSlice
+// +structType=atomic
+type EndpointPort struct {
+ // name represents the name of this port. All ports in an EndpointSlice must have a unique name.
+ // If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name.
+ // Name must either be an empty string or pass DNS_LABEL validation:
+ // * must be no more than 63 characters long.
+ // * must consist of lower case alphanumeric characters or '-'.
+ // * must start and end with an alphanumeric character.
+ // Default is empty string.
+ Name *string `json:"name,omitempty" protobuf:"bytes,1,name=name"`
+
+ // protocol represents the IP protocol for this port.
+ // Must be UDP, TCP, or SCTP.
+ // Default is TCP.
+ Protocol *slim_corev1.Protocol `json:"protocol,omitempty" protobuf:"bytes,2,name=protocol"`
+
+ // port represents the port number of the endpoint.
+ // If this is not specified, ports are not restricted and must be
+ // interpreted in the context of the specific consumer.
+ Port *int32 `json:"port,omitempty" protobuf:"bytes,3,opt,name=port"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// EndpointSliceList represents a list of endpoint slices
+type EndpointSliceList struct {
+ slim_metav1.TypeMeta `json:",inline"`
+
+ // Standard list metadata.
+ // +optional
+ slim_metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // items is the list of endpoint slices
+ Items []EndpointSlice `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1/well_known_labels.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1/well_known_labels.go
new file mode 100644
index 000000000..8193f423f
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1/well_known_labels.go
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Copyright 2019 The Kubernetes Authors.
+
+package v1
+
+const (
+ // LabelServiceName is used to indicate the name of a Kubernetes service.
+ LabelServiceName = "kubernetes.io/service-name"
+)
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1/zz_generated.deepcopy.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1/zz_generated.deepcopy.go
new file mode 100644
index 000000000..84538aa06
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1/zz_generated.deepcopy.go
@@ -0,0 +1,230 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ corev1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Endpoint) DeepCopyInto(out *Endpoint) {
+ *out = *in
+ if in.Addresses != nil {
+ in, out := &in.Addresses, &out.Addresses
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ in.Conditions.DeepCopyInto(&out.Conditions)
+ if in.DeprecatedTopology != nil {
+ in, out := &in.DeprecatedTopology, &out.DeprecatedTopology
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.NodeName != nil {
+ in, out := &in.NodeName, &out.NodeName
+ *out = new(string)
+ **out = **in
+ }
+ if in.Zone != nil {
+ in, out := &in.Zone, &out.Zone
+ *out = new(string)
+ **out = **in
+ }
+ if in.Hints != nil {
+ in, out := &in.Hints, &out.Hints
+ *out = new(EndpointHints)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Endpoint.
+func (in *Endpoint) DeepCopy() *Endpoint {
+ if in == nil {
+ return nil
+ }
+ out := new(Endpoint)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EndpointConditions) DeepCopyInto(out *EndpointConditions) {
+ *out = *in
+ if in.Ready != nil {
+ in, out := &in.Ready, &out.Ready
+ *out = new(bool)
+ **out = **in
+ }
+ if in.Serving != nil {
+ in, out := &in.Serving, &out.Serving
+ *out = new(bool)
+ **out = **in
+ }
+ if in.Terminating != nil {
+ in, out := &in.Terminating, &out.Terminating
+ *out = new(bool)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointConditions.
+func (in *EndpointConditions) DeepCopy() *EndpointConditions {
+ if in == nil {
+ return nil
+ }
+ out := new(EndpointConditions)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EndpointHints) DeepCopyInto(out *EndpointHints) {
+ *out = *in
+ if in.ForZones != nil {
+ in, out := &in.ForZones, &out.ForZones
+ *out = make([]ForZone, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointHints.
+func (in *EndpointHints) DeepCopy() *EndpointHints {
+ if in == nil {
+ return nil
+ }
+ out := new(EndpointHints)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EndpointPort) DeepCopyInto(out *EndpointPort) {
+ *out = *in
+ if in.Name != nil {
+ in, out := &in.Name, &out.Name
+ *out = new(string)
+ **out = **in
+ }
+ if in.Protocol != nil {
+ in, out := &in.Protocol, &out.Protocol
+ *out = new(corev1.Protocol)
+ **out = **in
+ }
+ if in.Port != nil {
+ in, out := &in.Port, &out.Port
+ *out = new(int32)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointPort.
+func (in *EndpointPort) DeepCopy() *EndpointPort {
+ if in == nil {
+ return nil
+ }
+ out := new(EndpointPort)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EndpointSlice) DeepCopyInto(out *EndpointSlice) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ if in.Endpoints != nil {
+ in, out := &in.Endpoints, &out.Endpoints
+ *out = make([]Endpoint, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Ports != nil {
+ in, out := &in.Ports, &out.Ports
+ *out = make([]EndpointPort, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointSlice.
+func (in *EndpointSlice) DeepCopy() *EndpointSlice {
+ if in == nil {
+ return nil
+ }
+ out := new(EndpointSlice)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *EndpointSlice) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EndpointSliceList) DeepCopyInto(out *EndpointSliceList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]EndpointSlice, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointSliceList.
+func (in *EndpointSliceList) DeepCopy() *EndpointSliceList {
+ if in == nil {
+ return nil
+ }
+ out := new(EndpointSliceList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *EndpointSliceList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ForZone) DeepCopyInto(out *ForZone) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ForZone.
+func (in *ForZone) DeepCopy() *ForZone {
+ if in == nil {
+ return nil
+ }
+ out := new(ForZone)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1/zz_generated.deepequal.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1/zz_generated.deepequal.go
new file mode 100644
index 000000000..5a0338855
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1/zz_generated.deepequal.go
@@ -0,0 +1,284 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by deepequal-gen. DO NOT EDIT.
+
+package v1
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *Endpoint) DeepEqual(other *Endpoint) bool {
+ if other == nil {
+ return false
+ }
+
+ if ((in.Addresses != nil) && (other.Addresses != nil)) || ((in.Addresses == nil) != (other.Addresses == nil)) {
+ in, other := &in.Addresses, &other.Addresses
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if inElement != (*other)[i] {
+ return false
+ }
+ }
+ }
+ }
+
+ if !in.Conditions.DeepEqual(&other.Conditions) {
+ return false
+ }
+
+ if ((in.DeprecatedTopology != nil) && (other.DeprecatedTopology != nil)) || ((in.DeprecatedTopology == nil) != (other.DeprecatedTopology == nil)) {
+ in, other := &in.DeprecatedTopology, &other.DeprecatedTopology
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for key, inValue := range *in {
+ if otherValue, present := (*other)[key]; !present {
+ return false
+ } else {
+ if inValue != otherValue {
+ return false
+ }
+ }
+ }
+ }
+ }
+
+ if (in.NodeName == nil) != (other.NodeName == nil) {
+ return false
+ } else if in.NodeName != nil {
+ if *in.NodeName != *other.NodeName {
+ return false
+ }
+ }
+
+ if (in.Zone == nil) != (other.Zone == nil) {
+ return false
+ } else if in.Zone != nil {
+ if *in.Zone != *other.Zone {
+ return false
+ }
+ }
+
+ if (in.Hints == nil) != (other.Hints == nil) {
+ return false
+ } else if in.Hints != nil {
+ if !in.Hints.DeepEqual(other.Hints) {
+ return false
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *EndpointConditions) DeepEqual(other *EndpointConditions) bool {
+ if other == nil {
+ return false
+ }
+
+ if (in.Ready == nil) != (other.Ready == nil) {
+ return false
+ } else if in.Ready != nil {
+ if *in.Ready != *other.Ready {
+ return false
+ }
+ }
+
+ if (in.Serving == nil) != (other.Serving == nil) {
+ return false
+ } else if in.Serving != nil {
+ if *in.Serving != *other.Serving {
+ return false
+ }
+ }
+
+ if (in.Terminating == nil) != (other.Terminating == nil) {
+ return false
+ } else if in.Terminating != nil {
+ if *in.Terminating != *other.Terminating {
+ return false
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *EndpointHints) DeepEqual(other *EndpointHints) bool {
+ if other == nil {
+ return false
+ }
+
+ if ((in.ForZones != nil) && (other.ForZones != nil)) || ((in.ForZones == nil) != (other.ForZones == nil)) {
+ in, other := &in.ForZones, &other.ForZones
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if !inElement.DeepEqual(&(*other)[i]) {
+ return false
+ }
+ }
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *EndpointPort) DeepEqual(other *EndpointPort) bool {
+ if other == nil {
+ return false
+ }
+
+ if (in.Name == nil) != (other.Name == nil) {
+ return false
+ } else if in.Name != nil {
+ if *in.Name != *other.Name {
+ return false
+ }
+ }
+
+ if (in.Protocol == nil) != (other.Protocol == nil) {
+ return false
+ } else if in.Protocol != nil {
+ if *in.Protocol != *other.Protocol {
+ return false
+ }
+ }
+
+ if (in.Port == nil) != (other.Port == nil) {
+ return false
+ } else if in.Port != nil {
+ if *in.Port != *other.Port {
+ return false
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *EndpointSlice) DeepEqual(other *EndpointSlice) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.TypeMeta != other.TypeMeta {
+ return false
+ }
+
+ if !in.ObjectMeta.DeepEqual(&other.ObjectMeta) {
+ return false
+ }
+
+ if in.AddressType != other.AddressType {
+ return false
+ }
+ if ((in.Endpoints != nil) && (other.Endpoints != nil)) || ((in.Endpoints == nil) != (other.Endpoints == nil)) {
+ in, other := &in.Endpoints, &other.Endpoints
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if !inElement.DeepEqual(&(*other)[i]) {
+ return false
+ }
+ }
+ }
+ }
+
+ if ((in.Ports != nil) && (other.Ports != nil)) || ((in.Ports == nil) != (other.Ports == nil)) {
+ in, other := &in.Ports, &other.Ports
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if !inElement.DeepEqual(&(*other)[i]) {
+ return false
+ }
+ }
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *EndpointSliceList) DeepEqual(other *EndpointSliceList) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.TypeMeta != other.TypeMeta {
+ return false
+ }
+
+ if !in.ListMeta.DeepEqual(&other.ListMeta) {
+ return false
+ }
+
+ if ((in.Items != nil) && (other.Items != nil)) || ((in.Items == nil) != (other.Items == nil)) {
+ in, other := &in.Items, &other.Items
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if !inElement.DeepEqual(&(*other)[i]) {
+ return false
+ }
+ }
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *ForZone) DeepEqual(other *ForZone) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.Name != other.Name {
+ return false
+ }
+
+ return true
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1/doc.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1/doc.go
new file mode 100644
index 000000000..67e6a8769
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1/doc.go
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// +k8s:deepcopy-gen=package
+// +k8s:protobuf-gen=package
+// +k8s:openapi-gen=true
+// +k8s:prerelease-lifecycle-gen=true
+// +deepequal-gen=package
+// +groupName=discovery.k8s.io
+
+// Package v1beta1 contains slimmer versions of k8s discovery types.
+package v1beta1
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1/generated.pb.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1/generated.pb.go
new file mode 100644
index 000000000..3280b5e8f
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1/generated.pb.go
@@ -0,0 +1,1992 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1/generated.proto
+
+package v1beta1
+
+import (
+ fmt "fmt"
+
+ github_com_cilium_cilium_pkg_k8s_slim_k8s_api_core_v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1"
+
+ io "io"
+
+ proto "github.com/gogo/protobuf/proto"
+ github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
+
+ math "math"
+ math_bits "math/bits"
+ reflect "reflect"
+ strings "strings"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+func (m *Endpoint) Reset() { *m = Endpoint{} }
+func (*Endpoint) ProtoMessage() {}
+func (*Endpoint) Descriptor() ([]byte, []int) {
+ return fileDescriptor_db2884f8c881be40, []int{0}
+}
+func (m *Endpoint) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Endpoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *Endpoint) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Endpoint.Merge(m, src)
+}
+func (m *Endpoint) XXX_Size() int {
+ return m.Size()
+}
+func (m *Endpoint) XXX_DiscardUnknown() {
+ xxx_messageInfo_Endpoint.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Endpoint proto.InternalMessageInfo
+
+func (m *EndpointConditions) Reset() { *m = EndpointConditions{} }
+func (*EndpointConditions) ProtoMessage() {}
+func (*EndpointConditions) Descriptor() ([]byte, []int) {
+ return fileDescriptor_db2884f8c881be40, []int{1}
+}
+func (m *EndpointConditions) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *EndpointConditions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *EndpointConditions) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_EndpointConditions.Merge(m, src)
+}
+func (m *EndpointConditions) XXX_Size() int {
+ return m.Size()
+}
+func (m *EndpointConditions) XXX_DiscardUnknown() {
+ xxx_messageInfo_EndpointConditions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EndpointConditions proto.InternalMessageInfo
+
+func (m *EndpointHints) Reset() { *m = EndpointHints{} }
+func (*EndpointHints) ProtoMessage() {}
+func (*EndpointHints) Descriptor() ([]byte, []int) {
+ return fileDescriptor_db2884f8c881be40, []int{2}
+}
+func (m *EndpointHints) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *EndpointHints) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *EndpointHints) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_EndpointHints.Merge(m, src)
+}
+func (m *EndpointHints) XXX_Size() int {
+ return m.Size()
+}
+func (m *EndpointHints) XXX_DiscardUnknown() {
+ xxx_messageInfo_EndpointHints.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EndpointHints proto.InternalMessageInfo
+
+func (m *EndpointPort) Reset() { *m = EndpointPort{} }
+func (*EndpointPort) ProtoMessage() {}
+func (*EndpointPort) Descriptor() ([]byte, []int) {
+ return fileDescriptor_db2884f8c881be40, []int{3}
+}
+func (m *EndpointPort) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *EndpointPort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *EndpointPort) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_EndpointPort.Merge(m, src)
+}
+func (m *EndpointPort) XXX_Size() int {
+ return m.Size()
+}
+func (m *EndpointPort) XXX_DiscardUnknown() {
+ xxx_messageInfo_EndpointPort.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EndpointPort proto.InternalMessageInfo
+
+func (m *EndpointSlice) Reset() { *m = EndpointSlice{} }
+func (*EndpointSlice) ProtoMessage() {}
+func (*EndpointSlice) Descriptor() ([]byte, []int) {
+ return fileDescriptor_db2884f8c881be40, []int{4}
+}
+func (m *EndpointSlice) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *EndpointSlice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *EndpointSlice) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_EndpointSlice.Merge(m, src)
+}
+func (m *EndpointSlice) XXX_Size() int {
+ return m.Size()
+}
+func (m *EndpointSlice) XXX_DiscardUnknown() {
+ xxx_messageInfo_EndpointSlice.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EndpointSlice proto.InternalMessageInfo
+
+func (m *EndpointSliceList) Reset() { *m = EndpointSliceList{} }
+func (*EndpointSliceList) ProtoMessage() {}
+func (*EndpointSliceList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_db2884f8c881be40, []int{5}
+}
+func (m *EndpointSliceList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *EndpointSliceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *EndpointSliceList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_EndpointSliceList.Merge(m, src)
+}
+func (m *EndpointSliceList) XXX_Size() int {
+ return m.Size()
+}
+func (m *EndpointSliceList) XXX_DiscardUnknown() {
+ xxx_messageInfo_EndpointSliceList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EndpointSliceList proto.InternalMessageInfo
+
+func (m *ForZone) Reset() { *m = ForZone{} }
+func (*ForZone) ProtoMessage() {}
+func (*ForZone) Descriptor() ([]byte, []int) {
+ return fileDescriptor_db2884f8c881be40, []int{6}
+}
+func (m *ForZone) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ForZone) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ForZone) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ForZone.Merge(m, src)
+}
+func (m *ForZone) XXX_Size() int {
+ return m.Size()
+}
+func (m *ForZone) XXX_DiscardUnknown() {
+ xxx_messageInfo_ForZone.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ForZone proto.InternalMessageInfo
+
+func init() {
+ proto.RegisterType((*Endpoint)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.discovery.v1beta1.Endpoint")
+ proto.RegisterMapType((map[string]string)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.discovery.v1beta1.Endpoint.TopologyEntry")
+ proto.RegisterType((*EndpointConditions)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.discovery.v1beta1.EndpointConditions")
+ proto.RegisterType((*EndpointHints)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.discovery.v1beta1.EndpointHints")
+ proto.RegisterType((*EndpointPort)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.discovery.v1beta1.EndpointPort")
+ proto.RegisterType((*EndpointSlice)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.discovery.v1beta1.EndpointSlice")
+ proto.RegisterType((*EndpointSliceList)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.discovery.v1beta1.EndpointSliceList")
+ proto.RegisterType((*ForZone)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.discovery.v1beta1.ForZone")
+}
+
+func init() {
+ proto.RegisterFile("github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1/generated.proto", fileDescriptor_db2884f8c881be40)
+}
+
+var fileDescriptor_db2884f8c881be40 = []byte{
+ // 806 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0x4f, 0x8b, 0xdb, 0x46,
+ 0x1c, 0xf5, 0xc4, 0xab, 0xae, 0x34, 0xda, 0xa5, 0xc9, 0xd0, 0x83, 0x59, 0x8a, 0x64, 0x04, 0x05,
+ 0x43, 0xa8, 0x84, 0x7d, 0x28, 0x4b, 0x0b, 0x4d, 0xa3, 0x76, 0x43, 0x02, 0x8d, 0x13, 0x66, 0x17,
+ 0x0a, 0xb9, 0xc9, 0xd2, 0x44, 0x3b, 0xb1, 0xa5, 0x11, 0x9a, 0xb1, 0xc1, 0x3d, 0xf5, 0x52, 0x72,
+ 0x6c, 0x3f, 0x4a, 0x4f, 0x85, 0x7e, 0x83, 0x3d, 0xe6, 0x98, 0x93, 0xe8, 0xaa, 0x5f, 0xa2, 0xe4,
+ 0x54, 0x66, 0xf4, 0xcf, 0xae, 0x0b, 0x65, 0x77, 0x7d, 0xf2, 0xcc, 0x9b, 0x99, 0xf7, 0xde, 0xfc,
+ 0x9e, 0x7e, 0x83, 0xe1, 0x8b, 0x98, 0x8a, 0xcb, 0xe5, 0xcc, 0x0d, 0x59, 0xe2, 0x85, 0x74, 0x41,
+ 0x97, 0xed, 0x4f, 0x36, 0x8f, 0xbd, 0xf9, 0x29, 0xf7, 0xf8, 0x82, 0x26, 0x6a, 0x10, 0x64, 0xd4,
+ 0x8b, 0x28, 0x0f, 0xd9, 0x8a, 0xe4, 0x6b, 0x6f, 0x35, 0x9e, 0x11, 0x11, 0x8c, 0xbd, 0x98, 0xa4,
+ 0x24, 0x0f, 0x04, 0x89, 0xdc, 0x2c, 0x67, 0x82, 0xa1, 0x47, 0x1d, 0xa1, 0x5b, 0x31, 0x35, 0x3f,
+ 0xd9, 0x3c, 0x76, 0xe7, 0xa7, 0xdc, 0x95, 0x84, 0x6a, 0x10, 0x64, 0xd4, 0x6d, 0x09, 0xdd, 0x9a,
+ 0xf0, 0xe4, 0xec, 0x66, 0x8e, 0x42, 0x96, 0x13, 0x6f, 0xb5, 0xe3, 0xe3, 0xe4, 0xc9, 0x8d, 0x68,
+ 0xb8, 0x97, 0x10, 0x11, 0xfc, 0x17, 0xcf, 0xe7, 0x1b, 0x3c, 0x31, 0x8b, 0x99, 0xa7, 0xe0, 0xd9,
+ 0xf2, 0xb5, 0x9a, 0xa9, 0x89, 0x1a, 0xd5, 0xdb, 0x25, 0xa1, 0x4b, 0x99, 0xe4, 0x4c, 0x82, 0xf0,
+ 0x92, 0xa6, 0xb2, 0x56, 0x52, 0x31, 0x5f, 0xa6, 0x82, 0x26, 0x64, 0x87, 0xff, 0x8b, 0xff, 0x3b,
+ 0xc0, 0xc3, 0x4b, 0x92, 0x04, 0xff, 0x3e, 0xe7, 0xfc, 0xd6, 0x87, 0xfa, 0x59, 0x1a, 0x65, 0x8c,
+ 0xa6, 0x02, 0x3d, 0x84, 0x46, 0x10, 0x45, 0x39, 0xe1, 0x9c, 0xf0, 0x01, 0x18, 0xf6, 0x47, 0x86,
+ 0x7f, 0x5c, 0x16, 0xb6, 0xf1, 0xb8, 0x01, 0x71, 0xb7, 0x8e, 0xde, 0x02, 0x08, 0x43, 0x96, 0x46,
+ 0x54, 0x50, 0x96, 0xf2, 0xc1, 0xbd, 0x21, 0x18, 0x99, 0x93, 0x73, 0xf7, 0x8e, 0xb9, 0xb9, 0x8d,
+ 0x99, 0x6f, 0x5b, 0x6a, 0x1f, 0x5d, 0x15, 0x76, 0xaf, 0x2c, 0x6c, 0xd8, 0x61, 0x78, 0x43, 0x1a,
+ 0xfd, 0x0c, 0xa0, 0x2e, 0x58, 0xc6, 0x16, 0x2c, 0x5e, 0x0f, 0xb4, 0x61, 0x7f, 0x64, 0x4e, 0x7e,
+ 0xd8, 0x9b, 0x0f, 0xf7, 0xa2, 0x66, 0x3e, 0x4b, 0x45, 0xbe, 0xf6, 0xef, 0xd7, 0x5e, 0xf4, 0x06,
+ 0xc6, 0xad, 0x34, 0x1a, 0x41, 0x3d, 0x65, 0x11, 0x99, 0x06, 0x09, 0x19, 0x7c, 0x34, 0x04, 0x23,
+ 0xc3, 0x3f, 0x92, 0x3b, 0xa7, 0x35, 0x86, 0xdb, 0xd5, 0x93, 0xaf, 0xe0, 0xf1, 0x16, 0x2d, 0xba,
+ 0x0f, 0xfb, 0x73, 0xb2, 0x1e, 0x00, 0x79, 0x0a, 0xcb, 0x21, 0xfa, 0x04, 0x6a, 0xab, 0x60, 0xb1,
+ 0x24, 0xaa, 0xb0, 0x06, 0xae, 0x26, 0x5f, 0xde, 0x3b, 0x05, 0xce, 0x2f, 0x00, 0xa2, 0xdd, 0x2a,
+ 0x21, 0x1b, 0x6a, 0x39, 0x09, 0xa2, 0x8a, 0x44, 0xf7, 0x8d, 0xb2, 0xb0, 0x35, 0x2c, 0x01, 0x5c,
+ 0xe1, 0xe8, 0x33, 0x78, 0xc8, 0x49, 0xbe, 0xa2, 0x69, 0xac, 0x38, 0x75, 0xdf, 0x2c, 0x0b, 0xfb,
+ 0xf0, 0xbc, 0x82, 0x70, 0xb3, 0x86, 0xc6, 0xd0, 0x14, 0x24, 0x4f, 0x68, 0x1a, 0x08, 0xb9, 0xb5,
+ 0xaf, 0xb6, 0x7e, 0x5c, 0x16, 0xb6, 0x79, 0xd1, 0xc1, 0x78, 0x73, 0x8f, 0xf3, 0x16, 0xc0, 0xe3,
+ 0xc6, 0xd1, 0x53, 0x9a, 0x0a, 0x8e, 0x56, 0x50, 0x7f, 0xcd, 0xf2, 0x57, 0x2c, 0xad, 0x3f, 0x24,
+ 0x73, 0xf2, 0xf4, 0xce, 0x89, 0x3c, 0xa9, 0x08, 0xbb, 0x08, 0x6a, 0x80, 0xe3, 0x56, 0xcb, 0xf9,
+ 0x1d, 0xc0, 0xa3, 0xc6, 0xc9, 0x4b, 0x96, 0x0b, 0xf4, 0x29, 0x3c, 0x48, 0x65, 0x1e, 0xaa, 0xb2,
+ 0xbe, 0x5e, 0x16, 0xf6, 0x81, 0xca, 0x42, 0xa1, 0xe8, 0x0d, 0xd4, 0x55, 0x1b, 0x84, 0x6c, 0x51,
+ 0xd5, 0xd9, 0x9f, 0x4a, 0xe2, 0x97, 0x35, 0xf6, 0xa1, 0xb0, 0xbf, 0xbe, 0xd5, 0x33, 0xe2, 0x36,
+ 0x0c, 0xb8, 0xe5, 0x97, 0x4e, 0x32, 0x96, 0x0b, 0x55, 0x50, 0xad, 0x72, 0x22, 0x1d, 0x62, 0x85,
+ 0x3a, 0x7f, 0xf4, 0xbb, 0x12, 0x9e, 0x2f, 0x68, 0x48, 0x50, 0x0e, 0x75, 0xf9, 0x98, 0x44, 0x81,
+ 0x08, 0x94, 0x7b, 0x73, 0xe2, 0xdf, 0xac, 0x84, 0xdc, 0x95, 0xe7, 0xdd, 0xd5, 0xd8, 0x7d, 0x31,
+ 0x7b, 0x43, 0x42, 0xf1, 0x9c, 0x88, 0xa0, 0xeb, 0xa5, 0x0e, 0xc3, 0xad, 0x0e, 0xfa, 0x0e, 0x9a,
+ 0x75, 0x83, 0x5f, 0xac, 0x33, 0x32, 0x38, 0x50, 0x25, 0x71, 0xea, 0x23, 0xe6, 0xe3, 0x6e, 0xe9,
+ 0xc3, 0xf6, 0x14, 0x6f, 0x1e, 0x43, 0x3f, 0x42, 0x83, 0xd4, 0x57, 0x91, 0xef, 0x82, 0x4c, 0xff,
+ 0xd9, 0xde, 0xfa, 0xd1, 0x7f, 0x50, 0xdb, 0x31, 0x1a, 0x84, 0xe3, 0x4e, 0x0e, 0xe5, 0x50, 0x93,
+ 0xf5, 0xe4, 0x83, 0xbe, 0xd2, 0x7d, 0xbe, 0x37, 0x5d, 0x99, 0x95, 0x7f, 0x5c, 0x6b, 0x6b, 0x72,
+ 0xc6, 0x71, 0x25, 0xe5, 0xfc, 0x0d, 0xe0, 0x83, 0xad, 0xec, 0xbe, 0xa7, 0x5c, 0xa0, 0x74, 0x27,
+ 0xbf, 0x6f, 0x6e, 0x9b, 0x9f, 0xe4, 0x53, 0xe9, 0xb5, 0x9f, 0x7e, 0x83, 0x6c, 0x64, 0xc7, 0xa1,
+ 0x46, 0x05, 0x49, 0x9a, 0x8a, 0x4f, 0xf7, 0x76, 0x73, 0x75, 0xa5, 0xee, 0xea, 0xcf, 0xa4, 0x08,
+ 0xae, 0xb4, 0x9c, 0x87, 0xf0, 0xb0, 0xee, 0x42, 0x34, 0xdc, 0xea, 0xb4, 0xa3, 0x7a, 0xfb, 0x46,
+ 0xb7, 0xf9, 0xe4, 0xea, 0xda, 0xea, 0xbd, 0xbb, 0xb6, 0x7a, 0xef, 0xaf, 0xad, 0xde, 0x4f, 0xa5,
+ 0x05, 0xae, 0x4a, 0x0b, 0xbc, 0x2b, 0x2d, 0xf0, 0xbe, 0xb4, 0xc0, 0x9f, 0xa5, 0x05, 0x7e, 0xfd,
+ 0xcb, 0xea, 0xbd, 0x7a, 0x74, 0xc7, 0xbf, 0x12, 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0xa3, 0x0c,
+ 0x62, 0x8d, 0x8c, 0x08, 0x00, 0x00,
+}
+
+func (m *Endpoint) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Endpoint) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Endpoint) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.NodeName != nil {
+ i -= len(*m.NodeName)
+ copy(dAtA[i:], *m.NodeName)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.NodeName)))
+ i--
+ dAtA[i] = 0x32
+ }
+ if len(m.Topology) > 0 {
+ keysForTopology := make([]string, 0, len(m.Topology))
+ for k := range m.Topology {
+ keysForTopology = append(keysForTopology, string(k))
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForTopology)
+ for iNdEx := len(keysForTopology) - 1; iNdEx >= 0; iNdEx-- {
+ v := m.Topology[string(keysForTopology[iNdEx])]
+ baseI := i
+ i -= len(v)
+ copy(dAtA[i:], v)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(v)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(keysForTopology[iNdEx])
+ copy(dAtA[i:], keysForTopology[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(keysForTopology[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0x2a
+ }
+ }
+ {
+ size, err := m.Conditions.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ if len(m.Addresses) > 0 {
+ for iNdEx := len(m.Addresses) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Addresses[iNdEx])
+ copy(dAtA[i:], m.Addresses[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Addresses[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *EndpointConditions) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *EndpointConditions) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *EndpointConditions) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Terminating != nil {
+ i--
+ if *m.Terminating {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x18
+ }
+ if m.Serving != nil {
+ i--
+ if *m.Serving {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x10
+ }
+ if m.Ready != nil {
+ i--
+ if *m.Ready {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *EndpointHints) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *EndpointHints) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *EndpointHints) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.ForZones) > 0 {
+ for iNdEx := len(m.ForZones) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.ForZones[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *EndpointPort) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *EndpointPort) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *EndpointPort) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Port != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.Port))
+ i--
+ dAtA[i] = 0x18
+ }
+ if m.Protocol != nil {
+ i -= len(*m.Protocol)
+ copy(dAtA[i:], *m.Protocol)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Protocol)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.Name != nil {
+ i -= len(*m.Name)
+ copy(dAtA[i:], *m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Name)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *EndpointSlice) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *EndpointSlice) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *EndpointSlice) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.AddressType)
+ copy(dAtA[i:], m.AddressType)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.AddressType)))
+ i--
+ dAtA[i] = 0x22
+ if len(m.Ports) > 0 {
+ for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if len(m.Endpoints) > 0 {
+ for iNdEx := len(m.Endpoints) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Endpoints[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *EndpointSliceList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *EndpointSliceList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *EndpointSliceList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ForZone) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ForZone) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ForZone) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
+ offset -= sovGenerated(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *Endpoint) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Addresses) > 0 {
+ for _, s := range m.Addresses {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ l = m.Conditions.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Topology) > 0 {
+ for k, v := range m.Topology {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+ n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+ }
+ }
+ if m.NodeName != nil {
+ l = len(*m.NodeName)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *EndpointConditions) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Ready != nil {
+ n += 2
+ }
+ if m.Serving != nil {
+ n += 2
+ }
+ if m.Terminating != nil {
+ n += 2
+ }
+ return n
+}
+
+func (m *EndpointHints) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.ForZones) > 0 {
+ for _, e := range m.ForZones {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *EndpointPort) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Name != nil {
+ l = len(*m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Protocol != nil {
+ l = len(*m.Protocol)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Port != nil {
+ n += 1 + sovGenerated(uint64(*m.Port))
+ }
+ return n
+}
+
+func (m *EndpointSlice) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Endpoints) > 0 {
+ for _, e := range m.Endpoints {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.Ports) > 0 {
+ for _, e := range m.Ports {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ l = len(m.AddressType)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *EndpointSliceList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ForZone) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func sovGenerated(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozGenerated(x uint64) (n int) {
+ return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *Endpoint) String() string {
+ if this == nil {
+ return "nil"
+ }
+ keysForTopology := make([]string, 0, len(this.Topology))
+ for k := range this.Topology {
+ keysForTopology = append(keysForTopology, k)
+ }
+ github_com_gogo_protobuf_sortkeys.Strings(keysForTopology)
+ mapStringForTopology := "map[string]string{"
+ for _, k := range keysForTopology {
+ mapStringForTopology += fmt.Sprintf("%v: %v,", k, this.Topology[k])
+ }
+ mapStringForTopology += "}"
+ s := strings.Join([]string{`&Endpoint{`,
+ `Addresses:` + fmt.Sprintf("%v", this.Addresses) + `,`,
+ `Conditions:` + strings.Replace(strings.Replace(this.Conditions.String(), "EndpointConditions", "EndpointConditions", 1), `&`, ``, 1) + `,`,
+ `Topology:` + mapStringForTopology + `,`,
+ `NodeName:` + valueToStringGenerated(this.NodeName) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *EndpointConditions) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&EndpointConditions{`,
+ `Ready:` + valueToStringGenerated(this.Ready) + `,`,
+ `Serving:` + valueToStringGenerated(this.Serving) + `,`,
+ `Terminating:` + valueToStringGenerated(this.Terminating) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *EndpointHints) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForForZones := "[]ForZone{"
+ for _, f := range this.ForZones {
+ repeatedStringForForZones += strings.Replace(strings.Replace(f.String(), "ForZone", "ForZone", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForForZones += "}"
+ s := strings.Join([]string{`&EndpointHints{`,
+ `ForZones:` + repeatedStringForForZones + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *EndpointPort) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&EndpointPort{`,
+ `Name:` + valueToStringGenerated(this.Name) + `,`,
+ `Protocol:` + valueToStringGenerated(this.Protocol) + `,`,
+ `Port:` + valueToStringGenerated(this.Port) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *EndpointSlice) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForEndpoints := "[]Endpoint{"
+ for _, f := range this.Endpoints {
+ repeatedStringForEndpoints += strings.Replace(strings.Replace(f.String(), "Endpoint", "Endpoint", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForEndpoints += "}"
+ repeatedStringForPorts := "[]EndpointPort{"
+ for _, f := range this.Ports {
+ repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "EndpointPort", "EndpointPort", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForPorts += "}"
+ s := strings.Join([]string{`&EndpointSlice{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Endpoints:` + repeatedStringForEndpoints + `,`,
+ `Ports:` + repeatedStringForPorts + `,`,
+ `AddressType:` + fmt.Sprintf("%v", this.AddressType) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *EndpointSliceList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]EndpointSlice{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "EndpointSlice", "EndpointSlice", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&EndpointSliceList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ForZone) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ForZone{`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func valueToStringGenerated(v interface{}) string {
+ rv := reflect.ValueOf(v)
+ if rv.IsNil() {
+ return "nil"
+ }
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("*%v", pv)
+}
+func (m *Endpoint) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Endpoint: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Endpoint: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Addresses = append(m.Addresses, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Conditions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Topology", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Topology == nil {
+ m.Topology = make(map[string]string)
+ }
+ var mapkey string
+ var mapvalue string
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+ m.Topology[mapkey] = mapvalue
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.NodeName = &s
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *EndpointConditions) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: EndpointConditions: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: EndpointConditions: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Ready", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ b := bool(v != 0)
+ m.Ready = &b
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Serving", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ b := bool(v != 0)
+ m.Serving = &b
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Terminating", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ b := bool(v != 0)
+ m.Terminating = &b
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *EndpointHints) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: EndpointHints: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: EndpointHints: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ForZones", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ForZones = append(m.ForZones, ForZone{})
+ if err := m.ForZones[len(m.ForZones)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *EndpointPort) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: EndpointPort: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: EndpointPort: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.Name = &s
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := github_com_cilium_cilium_pkg_k8s_slim_k8s_api_core_v1.Protocol(dAtA[iNdEx:postIndex])
+ m.Protocol = &s
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Port = &v
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *EndpointSlice) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: EndpointSlice: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: EndpointSlice: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Endpoints", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Endpoints = append(m.Endpoints, Endpoint{})
+ if err := m.Endpoints[len(m.Endpoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Ports = append(m.Ports, EndpointPort{})
+ if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AddressType", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.AddressType = AddressType(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *EndpointSliceList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: EndpointSliceList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: EndpointSliceList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, EndpointSlice{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ForZone) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ForZone: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ForZone: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipGenerated(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthGenerated
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupGenerated
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthGenerated
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1/generated.proto b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1/generated.proto
new file mode 100644
index 000000000..e9be56a84
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1/generated.proto
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = "proto2";
+
+package github.com.cilium.cilium.pkg.k8s.slim.k8s.api.discovery.v1beta1;
+
+import "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/generated.proto";
+import "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1";
+
+// Endpoint represents a single logical "backend" implementing a service.
+message Endpoint {
+ // addresses of this endpoint. The contents of this field are interpreted
+ // according to the corresponding EndpointSlice addressType field. Consumers
+ // must handle different types of addresses in the context of their own
+ // capabilities. This must contain at least one address but no more than
+ // 100. These are all assumed to be fungible and clients may choose to only
+ // use the first element. Refer to: https://issue.k8s.io/106267
+ // +listType=set
+ repeated string addresses = 1;
+
+ // conditions contains information about the current status of the endpoint.
+ optional EndpointConditions conditions = 2;
+
+ // topology contains arbitrary topology information associated with the
+ // endpoint. These key/value pairs must conform with the label format.
+ // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels
+ // Topology may include a maximum of 16 key/value pairs. This includes, but
+ // is not limited to the following well known keys:
+ // * kubernetes.io/hostname: the value indicates the hostname of the node
+ // where the endpoint is located. This should match the corresponding
+ // node label.
+ // * topology.kubernetes.io/zone: the value indicates the zone where the
+ // endpoint is located. This should match the corresponding node label.
+ // * topology.kubernetes.io/region: the value indicates the region where the
+ // endpoint is located. This should match the corresponding node label.
+ // This field is deprecated and will be removed in future api versions.
+ // +optional
+ map topology = 5;
+
+ // nodeName represents the name of the Node hosting this endpoint. This can
+ // be used to determine endpoints local to a Node.
+ // +optional
+ optional string nodeName = 6;
+}
+
+// EndpointConditions represents the current condition of an endpoint.
+message EndpointConditions {
+ // ready indicates that this endpoint is prepared to receive traffic,
+ // according to whatever system is managing the endpoint. A nil value
+ // indicates an unknown state. In most cases consumers should interpret this
+ // unknown state as ready. For compatibility reasons, ready should never be
+ // "true" for terminating endpoints.
+ // +optional
+ optional bool ready = 1;
+
+ // serving is identical to ready except that it is set regardless of the
+ // terminating state of endpoints. This condition should be set to true for
+ // a ready endpoint that is terminating. If nil, consumers should defer to
+ // the ready condition.
+ // +optional
+ optional bool serving = 2;
+
+ // terminating indicates that this endpoint is terminating. A nil value
+ // indicates an unknown state. Consumers should interpret this unknown state
+ // to mean that the endpoint is not terminating.
+ // +optional
+ optional bool terminating = 3;
+}
+
+// EndpointHints provides hints describing how an endpoint should be consumed.
+message EndpointHints {
+ // forZones indicates the zone(s) this endpoint should be consumed by to
+ // enable topology aware routing. May contain a maximum of 8 entries.
+ // +listType=atomic
+ repeated ForZone forZones = 1;
+}
+
+// EndpointPort represents a Port used by an EndpointSlice
+message EndpointPort {
+ // name represents the name of this port. All ports in an EndpointSlice must have a unique name.
+ // If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name.
+ // Name must either be an empty string or pass DNS_LABEL validation:
+ // * must be no more than 63 characters long.
+ // * must consist of lower case alphanumeric characters or '-'.
+ // * must start and end with an alphanumeric character.
+ // Default is empty string.
+ optional string name = 1;
+
+ // protocol represents the IP protocol for this port.
+ // Must be UDP, TCP, or SCTP.
+ // Default is TCP.
+ optional string protocol = 2;
+
+ // port represents the port number of the endpoint.
+ // If this is not specified, ports are not restricted and must be
+ // interpreted in the context of the specific consumer.
+ optional int32 port = 3;
+}
+
+// EndpointSlice represents a subset of the endpoints that implement a service.
+// For a given service there may be multiple EndpointSlice objects, selected by
+// labels, which must be joined to produce the full set of endpoints.
+message EndpointSlice {
+ // Standard object's metadata.
+ // +optional
+ optional github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // addressType specifies the type of address carried by this EndpointSlice.
+ // All addresses in this slice must be the same type. This field is
+ // immutable after creation. The following address types are currently
+ // supported:
+ // * IPv4: Represents an IPv4 Address.
+ // * IPv6: Represents an IPv6 Address.
+ // * FQDN: Represents a Fully Qualified Domain Name.
+ optional string addressType = 4;
+
+ // endpoints is a list of unique endpoints in this slice. Each slice may
+ // include a maximum of 1000 endpoints.
+ // +listType=atomic
+ repeated Endpoint endpoints = 2;
+
+ // ports specifies the list of network ports exposed by each endpoint in
+ // this slice. Each port must have a unique name. When ports is empty, it
+ // indicates that there are no defined ports. When a port is defined with a
+ // nil port value, it indicates "all ports". Each slice may include a
+ // maximum of 100 ports.
+ // +optional
+ // +listType=atomic
+ repeated EndpointPort ports = 3;
+}
+
+// EndpointSliceList represents a list of endpoint slices
+message EndpointSliceList {
+ // Standard list metadata.
+ // +optional
+ optional github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.ListMeta metadata = 1;
+
+ // items is the list of endpoint slices
+ repeated EndpointSlice items = 2;
+}
+
+// ForZone provides information about which zones should consume this endpoint.
+message ForZone {
+ // name represents the name of the zone.
+ optional string name = 1;
+}
+
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1/register.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1/register.go
new file mode 100644
index 000000000..9040aba7b
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1/register.go
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Copyright 2019 The Kubernetes Authors.
+
+package v1beta1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// GroupName is the group name used in this package
+const GroupName = "discovery.k8s.io"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"}
+
+// Kind takes an unqualified kind and returns a Group qualified GroupKind
+func Kind(kind string) schema.GroupKind {
+ return SchemeGroupVersion.WithKind(kind).GroupKind()
+}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+ return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+ // SchemeBuilder is the scheme builder with scheme init functions to run for this API package
+ SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
+ // AddToScheme is a common registration function for mapping packaged scoped group & version keys to a scheme
+ AddToScheme = SchemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to the given scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &EndpointSlice{},
+ &EndpointSliceList{},
+ )
+ metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1/types.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1/types.go
new file mode 100644
index 000000000..3e35fd2d2
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1/types.go
@@ -0,0 +1,180 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Copyright 2019 The Kubernetes Authors.
+
+package v1beta1
+
+import (
+ slim_corev1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1"
+ slim_metav1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1"
+)
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.16
+// +k8s:prerelease-lifecycle-gen:deprecated=1.21
+// +k8s:prerelease-lifecycle-gen:removed=1.25
+// +k8s:prerelease-lifecycle-gen:replacement=discovery.k8s.io,v1,EndpointSlice
+
+// EndpointSlice represents a subset of the endpoints that implement a service.
+// For a given service there may be multiple EndpointSlice objects, selected by
+// labels, which must be joined to produce the full set of endpoints.
+type EndpointSlice struct {
+ slim_metav1.TypeMeta `json:",inline"`
+
+ // Standard object's metadata.
+ // +optional
+ slim_metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // addressType specifies the type of address carried by this EndpointSlice.
+ // All addresses in this slice must be the same type. This field is
+ // immutable after creation. The following address types are currently
+ // supported:
+ // * IPv4: Represents an IPv4 Address.
+ // * IPv6: Represents an IPv6 Address.
+ // * FQDN: Represents a Fully Qualified Domain Name.
+ AddressType AddressType `json:"addressType" protobuf:"bytes,4,rep,name=addressType"`
+
+ // endpoints is a list of unique endpoints in this slice. Each slice may
+ // include a maximum of 1000 endpoints.
+ // +listType=atomic
+ Endpoints []Endpoint `json:"endpoints" protobuf:"bytes,2,rep,name=endpoints"`
+
+ // ports specifies the list of network ports exposed by each endpoint in
+ // this slice. Each port must have a unique name. When ports is empty, it
+ // indicates that there are no defined ports. When a port is defined with a
+ // nil port value, it indicates "all ports". Each slice may include a
+ // maximum of 100 ports.
+ // +optional
+ // +listType=atomic
+ Ports []EndpointPort `json:"ports" protobuf:"bytes,3,rep,name=ports"`
+}
+
+// AddressType represents the type of address referred to by an endpoint.
+type AddressType string
+
+const (
+ // AddressTypeIPv4 represents an IPv4 Address.
+ AddressTypeIPv4 = AddressType(slim_corev1.IPv4Protocol)
+
+ // AddressTypeIPv6 represents an IPv6 Address.
+ AddressTypeIPv6 = AddressType(slim_corev1.IPv6Protocol)
+
+ // AddressTypeFQDN represents a FQDN.
+ AddressTypeFQDN = AddressType("FQDN")
+)
+
+// Endpoint represents a single logical "backend" implementing a service.
+type Endpoint struct {
+ // addresses of this endpoint. The contents of this field are interpreted
+ // according to the corresponding EndpointSlice addressType field. Consumers
+ // must handle different types of addresses in the context of their own
+ // capabilities. This must contain at least one address but no more than
+ // 100. These are all assumed to be fungible and clients may choose to only
+ // use the first element. Refer to: https://issue.k8s.io/106267
+ // +listType=set
+ Addresses []string `json:"addresses" protobuf:"bytes,1,rep,name=addresses"`
+
+ // conditions contains information about the current status of the endpoint.
+ Conditions EndpointConditions `json:"conditions,omitempty" protobuf:"bytes,2,opt,name=conditions"`
+
+ // topology contains arbitrary topology information associated with the
+ // endpoint. These key/value pairs must conform with the label format.
+ // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels
+ // Topology may include a maximum of 16 key/value pairs. This includes, but
+ // is not limited to the following well known keys:
+ // * kubernetes.io/hostname: the value indicates the hostname of the node
+ // where the endpoint is located. This should match the corresponding
+ // node label.
+ // * topology.kubernetes.io/zone: the value indicates the zone where the
+ // endpoint is located. This should match the corresponding node label.
+ // * topology.kubernetes.io/region: the value indicates the region where the
+ // endpoint is located. This should match the corresponding node label.
+ // This field is deprecated and will be removed in future api versions.
+ // +optional
+ Topology map[string]string `json:"topology,omitempty" protobuf:"bytes,5,opt,name=topology"`
+
+ // nodeName represents the name of the Node hosting this endpoint. This can
+ // be used to determine endpoints local to a Node.
+ // +optional
+ NodeName *string `json:"nodeName,omitempty" protobuf:"bytes,6,opt,name=nodeName"`
+}
+
+// EndpointConditions represents the current condition of an endpoint.
+type EndpointConditions struct {
+ // ready indicates that this endpoint is prepared to receive traffic,
+ // according to whatever system is managing the endpoint. A nil value
+ // indicates an unknown state. In most cases consumers should interpret this
+ // unknown state as ready. For compatibility reasons, ready should never be
+ // "true" for terminating endpoints.
+ // +optional
+ Ready *bool `json:"ready,omitempty" protobuf:"bytes,1,name=ready"`
+
+ // serving is identical to ready except that it is set regardless of the
+ // terminating state of endpoints. This condition should be set to true for
+ // a ready endpoint that is terminating. If nil, consumers should defer to
+ // the ready condition.
+ // +optional
+ Serving *bool `json:"serving,omitempty" protobuf:"bytes,2,name=serving"`
+
+ // terminating indicates that this endpoint is terminating. A nil value
+ // indicates an unknown state. Consumers should interpret this unknown state
+ // to mean that the endpoint is not terminating.
+ // +optional
+ Terminating *bool `json:"terminating,omitempty" protobuf:"bytes,3,name=terminating"`
+}
+
+// EndpointHints provides hints describing how an endpoint should be consumed.
+type EndpointHints struct {
+ // forZones indicates the zone(s) this endpoint should be consumed by to
+ // enable topology aware routing. May contain a maximum of 8 entries.
+ // +listType=atomic
+ ForZones []ForZone `json:"forZones,omitempty" protobuf:"bytes,1,name=forZones"`
+}
+
+// ForZone provides information about which zones should consume this endpoint.
+type ForZone struct {
+ // name represents the name of the zone.
+ Name string `json:"name" protobuf:"bytes,1,name=name"`
+}
+
+// EndpointPort represents a Port used by an EndpointSlice
+type EndpointPort struct {
+ // name represents the name of this port. All ports in an EndpointSlice must have a unique name.
+ // If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name.
+ // Name must either be an empty string or pass DNS_LABEL validation:
+ // * must be no more than 63 characters long.
+ // * must consist of lower case alphanumeric characters or '-'.
+ // * must start and end with an alphanumeric character.
+ // Default is empty string.
+ Name *string `json:"name,omitempty" protobuf:"bytes,1,name=name"`
+
+ // protocol represents the IP protocol for this port.
+ // Must be UDP, TCP, or SCTP.
+ // Default is TCP.
+ Protocol *slim_corev1.Protocol `json:"protocol,omitempty" protobuf:"bytes,2,name=protocol"`
+
+ // port represents the port number of the endpoint.
+ // If this is not specified, ports are not restricted and must be
+ // interpreted in the context of the specific consumer.
+ Port *int32 `json:"port,omitempty" protobuf:"bytes,3,opt,name=port"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:prerelease-lifecycle-gen:introduced=1.16
+// +k8s:prerelease-lifecycle-gen:deprecated=1.21
+// +k8s:prerelease-lifecycle-gen:removed=1.25
+// +k8s:prerelease-lifecycle-gen:replacement=discovery.k8s.io,v1,EndpointSlice
+
+// EndpointSliceList represents a list of endpoint slices
+type EndpointSliceList struct {
+ slim_metav1.TypeMeta `json:",inline"`
+
+ // Standard list metadata.
+ // +optional
+ slim_metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // items is the list of endpoint slices
+ Items []EndpointSlice `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1/well_known_labels.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1/well_known_labels.go
new file mode 100644
index 000000000..40e19807e
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1/well_known_labels.go
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Copyright 2019 The Kubernetes Authors.
+
+package v1beta1
+
+const (
+ // LabelServiceName is used to indicate the name of a Kubernetes service.
+ LabelServiceName = "kubernetes.io/service-name"
+)
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1/zz_generated.deepcopy.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1/zz_generated.deepcopy.go
new file mode 100644
index 000000000..cbfe0d129
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1/zz_generated.deepcopy.go
@@ -0,0 +1,220 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Endpoint) DeepCopyInto(out *Endpoint) {
+ *out = *in
+ if in.Addresses != nil {
+ in, out := &in.Addresses, &out.Addresses
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ in.Conditions.DeepCopyInto(&out.Conditions)
+ if in.Topology != nil {
+ in, out := &in.Topology, &out.Topology
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.NodeName != nil {
+ in, out := &in.NodeName, &out.NodeName
+ *out = new(string)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Endpoint.
+func (in *Endpoint) DeepCopy() *Endpoint {
+ if in == nil {
+ return nil
+ }
+ out := new(Endpoint)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EndpointConditions) DeepCopyInto(out *EndpointConditions) {
+ *out = *in
+ if in.Ready != nil {
+ in, out := &in.Ready, &out.Ready
+ *out = new(bool)
+ **out = **in
+ }
+ if in.Serving != nil {
+ in, out := &in.Serving, &out.Serving
+ *out = new(bool)
+ **out = **in
+ }
+ if in.Terminating != nil {
+ in, out := &in.Terminating, &out.Terminating
+ *out = new(bool)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointConditions.
+func (in *EndpointConditions) DeepCopy() *EndpointConditions {
+ if in == nil {
+ return nil
+ }
+ out := new(EndpointConditions)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EndpointHints) DeepCopyInto(out *EndpointHints) {
+ *out = *in
+ if in.ForZones != nil {
+ in, out := &in.ForZones, &out.ForZones
+ *out = make([]ForZone, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointHints.
+func (in *EndpointHints) DeepCopy() *EndpointHints {
+ if in == nil {
+ return nil
+ }
+ out := new(EndpointHints)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EndpointPort) DeepCopyInto(out *EndpointPort) {
+ *out = *in
+ if in.Name != nil {
+ in, out := &in.Name, &out.Name
+ *out = new(string)
+ **out = **in
+ }
+ if in.Protocol != nil {
+ in, out := &in.Protocol, &out.Protocol
+ *out = new(v1.Protocol)
+ **out = **in
+ }
+ if in.Port != nil {
+ in, out := &in.Port, &out.Port
+ *out = new(int32)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointPort.
+func (in *EndpointPort) DeepCopy() *EndpointPort {
+ if in == nil {
+ return nil
+ }
+ out := new(EndpointPort)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EndpointSlice) DeepCopyInto(out *EndpointSlice) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ if in.Endpoints != nil {
+ in, out := &in.Endpoints, &out.Endpoints
+ *out = make([]Endpoint, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Ports != nil {
+ in, out := &in.Ports, &out.Ports
+ *out = make([]EndpointPort, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointSlice.
+func (in *EndpointSlice) DeepCopy() *EndpointSlice {
+ if in == nil {
+ return nil
+ }
+ out := new(EndpointSlice)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *EndpointSlice) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EndpointSliceList) DeepCopyInto(out *EndpointSliceList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]EndpointSlice, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointSliceList.
+func (in *EndpointSliceList) DeepCopy() *EndpointSliceList {
+ if in == nil {
+ return nil
+ }
+ out := new(EndpointSliceList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *EndpointSliceList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ForZone) DeepCopyInto(out *ForZone) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ForZone.
+func (in *ForZone) DeepCopy() *ForZone {
+ if in == nil {
+ return nil
+ }
+ out := new(ForZone)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1/zz_generated.deepequal.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1/zz_generated.deepequal.go
new file mode 100644
index 000000000..e25dc21f5
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1/zz_generated.deepequal.go
@@ -0,0 +1,268 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by deepequal-gen. DO NOT EDIT.
+
+package v1beta1
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *Endpoint) DeepEqual(other *Endpoint) bool {
+ if other == nil {
+ return false
+ }
+
+ if ((in.Addresses != nil) && (other.Addresses != nil)) || ((in.Addresses == nil) != (other.Addresses == nil)) {
+ in, other := &in.Addresses, &other.Addresses
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if inElement != (*other)[i] {
+ return false
+ }
+ }
+ }
+ }
+
+ if !in.Conditions.DeepEqual(&other.Conditions) {
+ return false
+ }
+
+ if ((in.Topology != nil) && (other.Topology != nil)) || ((in.Topology == nil) != (other.Topology == nil)) {
+ in, other := &in.Topology, &other.Topology
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for key, inValue := range *in {
+ if otherValue, present := (*other)[key]; !present {
+ return false
+ } else {
+ if inValue != otherValue {
+ return false
+ }
+ }
+ }
+ }
+ }
+
+ if (in.NodeName == nil) != (other.NodeName == nil) {
+ return false
+ } else if in.NodeName != nil {
+ if *in.NodeName != *other.NodeName {
+ return false
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *EndpointConditions) DeepEqual(other *EndpointConditions) bool {
+ if other == nil {
+ return false
+ }
+
+ if (in.Ready == nil) != (other.Ready == nil) {
+ return false
+ } else if in.Ready != nil {
+ if *in.Ready != *other.Ready {
+ return false
+ }
+ }
+
+ if (in.Serving == nil) != (other.Serving == nil) {
+ return false
+ } else if in.Serving != nil {
+ if *in.Serving != *other.Serving {
+ return false
+ }
+ }
+
+ if (in.Terminating == nil) != (other.Terminating == nil) {
+ return false
+ } else if in.Terminating != nil {
+ if *in.Terminating != *other.Terminating {
+ return false
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *EndpointHints) DeepEqual(other *EndpointHints) bool {
+ if other == nil {
+ return false
+ }
+
+ if ((in.ForZones != nil) && (other.ForZones != nil)) || ((in.ForZones == nil) != (other.ForZones == nil)) {
+ in, other := &in.ForZones, &other.ForZones
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if !inElement.DeepEqual(&(*other)[i]) {
+ return false
+ }
+ }
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *EndpointPort) DeepEqual(other *EndpointPort) bool {
+ if other == nil {
+ return false
+ }
+
+ if (in.Name == nil) != (other.Name == nil) {
+ return false
+ } else if in.Name != nil {
+ if *in.Name != *other.Name {
+ return false
+ }
+ }
+
+ if (in.Protocol == nil) != (other.Protocol == nil) {
+ return false
+ } else if in.Protocol != nil {
+ if *in.Protocol != *other.Protocol {
+ return false
+ }
+ }
+
+ if (in.Port == nil) != (other.Port == nil) {
+ return false
+ } else if in.Port != nil {
+ if *in.Port != *other.Port {
+ return false
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *EndpointSlice) DeepEqual(other *EndpointSlice) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.TypeMeta != other.TypeMeta {
+ return false
+ }
+
+ if !in.ObjectMeta.DeepEqual(&other.ObjectMeta) {
+ return false
+ }
+
+ if in.AddressType != other.AddressType {
+ return false
+ }
+ if ((in.Endpoints != nil) && (other.Endpoints != nil)) || ((in.Endpoints == nil) != (other.Endpoints == nil)) {
+ in, other := &in.Endpoints, &other.Endpoints
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if !inElement.DeepEqual(&(*other)[i]) {
+ return false
+ }
+ }
+ }
+ }
+
+ if ((in.Ports != nil) && (other.Ports != nil)) || ((in.Ports == nil) != (other.Ports == nil)) {
+ in, other := &in.Ports, &other.Ports
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if !inElement.DeepEqual(&(*other)[i]) {
+ return false
+ }
+ }
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *EndpointSliceList) DeepEqual(other *EndpointSliceList) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.TypeMeta != other.TypeMeta {
+ return false
+ }
+
+ if !in.ListMeta.DeepEqual(&other.ListMeta) {
+ return false
+ }
+
+ if ((in.Items != nil) && (other.Items != nil)) || ((in.Items == nil) != (other.Items == nil)) {
+ in, other := &in.Items, &other.Items
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if !inElement.DeepEqual(&(*other)[i]) {
+ return false
+ }
+ }
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *ForZone) DeepEqual(other *ForZone) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.Name != other.Name {
+ return false
+ }
+
+ return true
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1/doc.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1/doc.go
new file mode 100644
index 000000000..269888d36
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1/doc.go
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// +k8s:deepcopy-gen=package
+// +k8s:protobuf-gen=package
+// +k8s:openapi-gen=true
+// +deepequal-gen=package
+// +groupName=networking.k8s.io
+
+// Package v1 contains slimmer versions of k8s networking types.
+package v1
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1/generated.pb.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1/generated.pb.go
new file mode 100644
index 000000000..d69850ce0
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1/generated.pb.go
@@ -0,0 +1,6471 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1/generated.proto
+
+package v1
+
+import (
+ fmt "fmt"
+
+ github_com_cilium_cilium_pkg_k8s_slim_k8s_api_core_v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1"
+ v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1"
+ intstr "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr"
+
+ io "io"
+
+ proto "github.com/gogo/protobuf/proto"
+
+ math "math"
+ math_bits "math/bits"
+ reflect "reflect"
+ strings "strings"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+func (m *HTTPIngressPath) Reset() { *m = HTTPIngressPath{} }
+func (*HTTPIngressPath) ProtoMessage() {}
+func (*HTTPIngressPath) Descriptor() ([]byte, []int) {
+ return fileDescriptor_5d3be2d57d520df2, []int{0}
+}
+func (m *HTTPIngressPath) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *HTTPIngressPath) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *HTTPIngressPath) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_HTTPIngressPath.Merge(m, src)
+}
+func (m *HTTPIngressPath) XXX_Size() int {
+ return m.Size()
+}
+func (m *HTTPIngressPath) XXX_DiscardUnknown() {
+ xxx_messageInfo_HTTPIngressPath.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_HTTPIngressPath proto.InternalMessageInfo
+
+func (m *HTTPIngressRuleValue) Reset() { *m = HTTPIngressRuleValue{} }
+func (*HTTPIngressRuleValue) ProtoMessage() {}
+func (*HTTPIngressRuleValue) Descriptor() ([]byte, []int) {
+ return fileDescriptor_5d3be2d57d520df2, []int{1}
+}
+func (m *HTTPIngressRuleValue) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *HTTPIngressRuleValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *HTTPIngressRuleValue) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_HTTPIngressRuleValue.Merge(m, src)
+}
+func (m *HTTPIngressRuleValue) XXX_Size() int {
+ return m.Size()
+}
+func (m *HTTPIngressRuleValue) XXX_DiscardUnknown() {
+ xxx_messageInfo_HTTPIngressRuleValue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_HTTPIngressRuleValue proto.InternalMessageInfo
+
+func (m *IPBlock) Reset() { *m = IPBlock{} }
+func (*IPBlock) ProtoMessage() {}
+func (*IPBlock) Descriptor() ([]byte, []int) {
+ return fileDescriptor_5d3be2d57d520df2, []int{2}
+}
+func (m *IPBlock) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *IPBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *IPBlock) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_IPBlock.Merge(m, src)
+}
+func (m *IPBlock) XXX_Size() int {
+ return m.Size()
+}
+func (m *IPBlock) XXX_DiscardUnknown() {
+ xxx_messageInfo_IPBlock.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_IPBlock proto.InternalMessageInfo
+
+func (m *Ingress) Reset() { *m = Ingress{} }
+func (*Ingress) ProtoMessage() {}
+func (*Ingress) Descriptor() ([]byte, []int) {
+ return fileDescriptor_5d3be2d57d520df2, []int{3}
+}
+func (m *Ingress) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Ingress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *Ingress) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Ingress.Merge(m, src)
+}
+func (m *Ingress) XXX_Size() int {
+ return m.Size()
+}
+func (m *Ingress) XXX_DiscardUnknown() {
+ xxx_messageInfo_Ingress.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Ingress proto.InternalMessageInfo
+
+func (m *IngressBackend) Reset() { *m = IngressBackend{} }
+func (*IngressBackend) ProtoMessage() {}
+func (*IngressBackend) Descriptor() ([]byte, []int) {
+ return fileDescriptor_5d3be2d57d520df2, []int{4}
+}
+func (m *IngressBackend) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *IngressBackend) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *IngressBackend) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_IngressBackend.Merge(m, src)
+}
+func (m *IngressBackend) XXX_Size() int {
+ return m.Size()
+}
+func (m *IngressBackend) XXX_DiscardUnknown() {
+ xxx_messageInfo_IngressBackend.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_IngressBackend proto.InternalMessageInfo
+
+func (m *IngressClass) Reset() { *m = IngressClass{} }
+func (*IngressClass) ProtoMessage() {}
+func (*IngressClass) Descriptor() ([]byte, []int) {
+ return fileDescriptor_5d3be2d57d520df2, []int{5}
+}
+func (m *IngressClass) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *IngressClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *IngressClass) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_IngressClass.Merge(m, src)
+}
+func (m *IngressClass) XXX_Size() int {
+ return m.Size()
+}
+func (m *IngressClass) XXX_DiscardUnknown() {
+ xxx_messageInfo_IngressClass.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_IngressClass proto.InternalMessageInfo
+
+func (m *IngressClassList) Reset() { *m = IngressClassList{} }
+func (*IngressClassList) ProtoMessage() {}
+func (*IngressClassList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_5d3be2d57d520df2, []int{6}
+}
+func (m *IngressClassList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *IngressClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *IngressClassList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_IngressClassList.Merge(m, src)
+}
+func (m *IngressClassList) XXX_Size() int {
+ return m.Size()
+}
+func (m *IngressClassList) XXX_DiscardUnknown() {
+ xxx_messageInfo_IngressClassList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_IngressClassList proto.InternalMessageInfo
+
+func (m *IngressClassParametersReference) Reset() { *m = IngressClassParametersReference{} }
+func (*IngressClassParametersReference) ProtoMessage() {}
+func (*IngressClassParametersReference) Descriptor() ([]byte, []int) {
+ return fileDescriptor_5d3be2d57d520df2, []int{7}
+}
+func (m *IngressClassParametersReference) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *IngressClassParametersReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *IngressClassParametersReference) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_IngressClassParametersReference.Merge(m, src)
+}
+func (m *IngressClassParametersReference) XXX_Size() int {
+ return m.Size()
+}
+func (m *IngressClassParametersReference) XXX_DiscardUnknown() {
+ xxx_messageInfo_IngressClassParametersReference.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_IngressClassParametersReference proto.InternalMessageInfo
+
+func (m *IngressClassSpec) Reset() { *m = IngressClassSpec{} }
+func (*IngressClassSpec) ProtoMessage() {}
+func (*IngressClassSpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_5d3be2d57d520df2, []int{8}
+}
+func (m *IngressClassSpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *IngressClassSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *IngressClassSpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_IngressClassSpec.Merge(m, src)
+}
+func (m *IngressClassSpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *IngressClassSpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_IngressClassSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_IngressClassSpec proto.InternalMessageInfo
+
+func (m *IngressList) Reset() { *m = IngressList{} }
+func (*IngressList) ProtoMessage() {}
+func (*IngressList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_5d3be2d57d520df2, []int{9}
+}
+func (m *IngressList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *IngressList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *IngressList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_IngressList.Merge(m, src)
+}
+func (m *IngressList) XXX_Size() int {
+ return m.Size()
+}
+func (m *IngressList) XXX_DiscardUnknown() {
+ xxx_messageInfo_IngressList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_IngressList proto.InternalMessageInfo
+
+func (m *IngressLoadBalancerIngress) Reset() { *m = IngressLoadBalancerIngress{} }
+func (*IngressLoadBalancerIngress) ProtoMessage() {}
+func (*IngressLoadBalancerIngress) Descriptor() ([]byte, []int) {
+ return fileDescriptor_5d3be2d57d520df2, []int{10}
+}
+func (m *IngressLoadBalancerIngress) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *IngressLoadBalancerIngress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *IngressLoadBalancerIngress) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_IngressLoadBalancerIngress.Merge(m, src)
+}
+func (m *IngressLoadBalancerIngress) XXX_Size() int {
+ return m.Size()
+}
+func (m *IngressLoadBalancerIngress) XXX_DiscardUnknown() {
+ xxx_messageInfo_IngressLoadBalancerIngress.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_IngressLoadBalancerIngress proto.InternalMessageInfo
+
+func (m *IngressLoadBalancerStatus) Reset() { *m = IngressLoadBalancerStatus{} }
+func (*IngressLoadBalancerStatus) ProtoMessage() {}
+func (*IngressLoadBalancerStatus) Descriptor() ([]byte, []int) {
+ return fileDescriptor_5d3be2d57d520df2, []int{11}
+}
+func (m *IngressLoadBalancerStatus) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *IngressLoadBalancerStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *IngressLoadBalancerStatus) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_IngressLoadBalancerStatus.Merge(m, src)
+}
+func (m *IngressLoadBalancerStatus) XXX_Size() int {
+ return m.Size()
+}
+func (m *IngressLoadBalancerStatus) XXX_DiscardUnknown() {
+ xxx_messageInfo_IngressLoadBalancerStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_IngressLoadBalancerStatus proto.InternalMessageInfo
+
+func (m *IngressPortStatus) Reset() { *m = IngressPortStatus{} }
+func (*IngressPortStatus) ProtoMessage() {}
+func (*IngressPortStatus) Descriptor() ([]byte, []int) {
+ return fileDescriptor_5d3be2d57d520df2, []int{12}
+}
+func (m *IngressPortStatus) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *IngressPortStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *IngressPortStatus) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_IngressPortStatus.Merge(m, src)
+}
+func (m *IngressPortStatus) XXX_Size() int {
+ return m.Size()
+}
+func (m *IngressPortStatus) XXX_DiscardUnknown() {
+ xxx_messageInfo_IngressPortStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_IngressPortStatus proto.InternalMessageInfo
+
+func (m *IngressRule) Reset() { *m = IngressRule{} }
+func (*IngressRule) ProtoMessage() {}
+func (*IngressRule) Descriptor() ([]byte, []int) {
+ return fileDescriptor_5d3be2d57d520df2, []int{13}
+}
+func (m *IngressRule) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *IngressRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *IngressRule) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_IngressRule.Merge(m, src)
+}
+func (m *IngressRule) XXX_Size() int {
+ return m.Size()
+}
+func (m *IngressRule) XXX_DiscardUnknown() {
+ xxx_messageInfo_IngressRule.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_IngressRule proto.InternalMessageInfo
+
+func (m *IngressRuleValue) Reset() { *m = IngressRuleValue{} }
+func (*IngressRuleValue) ProtoMessage() {}
+func (*IngressRuleValue) Descriptor() ([]byte, []int) {
+ return fileDescriptor_5d3be2d57d520df2, []int{14}
+}
+func (m *IngressRuleValue) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *IngressRuleValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *IngressRuleValue) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_IngressRuleValue.Merge(m, src)
+}
+func (m *IngressRuleValue) XXX_Size() int {
+ return m.Size()
+}
+func (m *IngressRuleValue) XXX_DiscardUnknown() {
+ xxx_messageInfo_IngressRuleValue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_IngressRuleValue proto.InternalMessageInfo
+
+func (m *IngressServiceBackend) Reset() { *m = IngressServiceBackend{} }
+func (*IngressServiceBackend) ProtoMessage() {}
+func (*IngressServiceBackend) Descriptor() ([]byte, []int) {
+ return fileDescriptor_5d3be2d57d520df2, []int{15}
+}
+func (m *IngressServiceBackend) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *IngressServiceBackend) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *IngressServiceBackend) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_IngressServiceBackend.Merge(m, src)
+}
+func (m *IngressServiceBackend) XXX_Size() int {
+ return m.Size()
+}
+func (m *IngressServiceBackend) XXX_DiscardUnknown() {
+ xxx_messageInfo_IngressServiceBackend.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_IngressServiceBackend proto.InternalMessageInfo
+
+func (m *IngressSpec) Reset() { *m = IngressSpec{} }
+func (*IngressSpec) ProtoMessage() {}
+func (*IngressSpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_5d3be2d57d520df2, []int{16}
+}
+func (m *IngressSpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *IngressSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *IngressSpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_IngressSpec.Merge(m, src)
+}
+func (m *IngressSpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *IngressSpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_IngressSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_IngressSpec proto.InternalMessageInfo
+
+func (m *IngressStatus) Reset() { *m = IngressStatus{} }
+func (*IngressStatus) ProtoMessage() {}
+func (*IngressStatus) Descriptor() ([]byte, []int) {
+ return fileDescriptor_5d3be2d57d520df2, []int{17}
+}
+func (m *IngressStatus) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *IngressStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *IngressStatus) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_IngressStatus.Merge(m, src)
+}
+func (m *IngressStatus) XXX_Size() int {
+ return m.Size()
+}
+func (m *IngressStatus) XXX_DiscardUnknown() {
+ xxx_messageInfo_IngressStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_IngressStatus proto.InternalMessageInfo
+
+func (m *IngressTLS) Reset() { *m = IngressTLS{} }
+func (*IngressTLS) ProtoMessage() {}
+func (*IngressTLS) Descriptor() ([]byte, []int) {
+ return fileDescriptor_5d3be2d57d520df2, []int{18}
+}
+func (m *IngressTLS) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *IngressTLS) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *IngressTLS) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_IngressTLS.Merge(m, src)
+}
+func (m *IngressTLS) XXX_Size() int {
+ return m.Size()
+}
+func (m *IngressTLS) XXX_DiscardUnknown() {
+ xxx_messageInfo_IngressTLS.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_IngressTLS proto.InternalMessageInfo
+
+func (m *NetworkPolicy) Reset() { *m = NetworkPolicy{} }
+func (*NetworkPolicy) ProtoMessage() {}
+func (*NetworkPolicy) Descriptor() ([]byte, []int) {
+ return fileDescriptor_5d3be2d57d520df2, []int{19}
+}
+func (m *NetworkPolicy) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *NetworkPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *NetworkPolicy) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_NetworkPolicy.Merge(m, src)
+}
+func (m *NetworkPolicy) XXX_Size() int {
+ return m.Size()
+}
+func (m *NetworkPolicy) XXX_DiscardUnknown() {
+ xxx_messageInfo_NetworkPolicy.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NetworkPolicy proto.InternalMessageInfo
+
+func (m *NetworkPolicyEgressRule) Reset() { *m = NetworkPolicyEgressRule{} }
+func (*NetworkPolicyEgressRule) ProtoMessage() {}
+func (*NetworkPolicyEgressRule) Descriptor() ([]byte, []int) {
+ return fileDescriptor_5d3be2d57d520df2, []int{20}
+}
+func (m *NetworkPolicyEgressRule) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *NetworkPolicyEgressRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *NetworkPolicyEgressRule) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_NetworkPolicyEgressRule.Merge(m, src)
+}
+func (m *NetworkPolicyEgressRule) XXX_Size() int {
+ return m.Size()
+}
+func (m *NetworkPolicyEgressRule) XXX_DiscardUnknown() {
+ xxx_messageInfo_NetworkPolicyEgressRule.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NetworkPolicyEgressRule proto.InternalMessageInfo
+
+func (m *NetworkPolicyIngressRule) Reset() { *m = NetworkPolicyIngressRule{} }
+func (*NetworkPolicyIngressRule) ProtoMessage() {}
+func (*NetworkPolicyIngressRule) Descriptor() ([]byte, []int) {
+ return fileDescriptor_5d3be2d57d520df2, []int{21}
+}
+func (m *NetworkPolicyIngressRule) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *NetworkPolicyIngressRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *NetworkPolicyIngressRule) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_NetworkPolicyIngressRule.Merge(m, src)
+}
+func (m *NetworkPolicyIngressRule) XXX_Size() int {
+ return m.Size()
+}
+func (m *NetworkPolicyIngressRule) XXX_DiscardUnknown() {
+ xxx_messageInfo_NetworkPolicyIngressRule.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NetworkPolicyIngressRule proto.InternalMessageInfo
+
+func (m *NetworkPolicyList) Reset() { *m = NetworkPolicyList{} }
+func (*NetworkPolicyList) ProtoMessage() {}
+func (*NetworkPolicyList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_5d3be2d57d520df2, []int{22}
+}
+func (m *NetworkPolicyList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *NetworkPolicyList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *NetworkPolicyList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_NetworkPolicyList.Merge(m, src)
+}
+func (m *NetworkPolicyList) XXX_Size() int {
+ return m.Size()
+}
+func (m *NetworkPolicyList) XXX_DiscardUnknown() {
+ xxx_messageInfo_NetworkPolicyList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NetworkPolicyList proto.InternalMessageInfo
+
+func (m *NetworkPolicyPeer) Reset() { *m = NetworkPolicyPeer{} }
+func (*NetworkPolicyPeer) ProtoMessage() {}
+func (*NetworkPolicyPeer) Descriptor() ([]byte, []int) {
+ return fileDescriptor_5d3be2d57d520df2, []int{23}
+}
+func (m *NetworkPolicyPeer) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *NetworkPolicyPeer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *NetworkPolicyPeer) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_NetworkPolicyPeer.Merge(m, src)
+}
+func (m *NetworkPolicyPeer) XXX_Size() int {
+ return m.Size()
+}
+func (m *NetworkPolicyPeer) XXX_DiscardUnknown() {
+ xxx_messageInfo_NetworkPolicyPeer.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NetworkPolicyPeer proto.InternalMessageInfo
+
+func (m *NetworkPolicyPort) Reset() { *m = NetworkPolicyPort{} }
+func (*NetworkPolicyPort) ProtoMessage() {}
+func (*NetworkPolicyPort) Descriptor() ([]byte, []int) {
+ return fileDescriptor_5d3be2d57d520df2, []int{24}
+}
+func (m *NetworkPolicyPort) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *NetworkPolicyPort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *NetworkPolicyPort) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_NetworkPolicyPort.Merge(m, src)
+}
+func (m *NetworkPolicyPort) XXX_Size() int {
+ return m.Size()
+}
+func (m *NetworkPolicyPort) XXX_DiscardUnknown() {
+ xxx_messageInfo_NetworkPolicyPort.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NetworkPolicyPort proto.InternalMessageInfo
+
+func (m *NetworkPolicySpec) Reset() { *m = NetworkPolicySpec{} }
+func (*NetworkPolicySpec) ProtoMessage() {}
+func (*NetworkPolicySpec) Descriptor() ([]byte, []int) {
+ return fileDescriptor_5d3be2d57d520df2, []int{25}
+}
+func (m *NetworkPolicySpec) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *NetworkPolicySpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *NetworkPolicySpec) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_NetworkPolicySpec.Merge(m, src)
+}
+func (m *NetworkPolicySpec) XXX_Size() int {
+ return m.Size()
+}
+func (m *NetworkPolicySpec) XXX_DiscardUnknown() {
+ xxx_messageInfo_NetworkPolicySpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_NetworkPolicySpec proto.InternalMessageInfo
+
+func (m *ServiceBackendPort) Reset() { *m = ServiceBackendPort{} }
+func (*ServiceBackendPort) ProtoMessage() {}
+func (*ServiceBackendPort) Descriptor() ([]byte, []int) {
+ return fileDescriptor_5d3be2d57d520df2, []int{26}
+}
+func (m *ServiceBackendPort) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ServiceBackendPort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *ServiceBackendPort) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ServiceBackendPort.Merge(m, src)
+}
+func (m *ServiceBackendPort) XXX_Size() int {
+ return m.Size()
+}
+func (m *ServiceBackendPort) XXX_DiscardUnknown() {
+ xxx_messageInfo_ServiceBackendPort.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ServiceBackendPort proto.InternalMessageInfo
+
+func init() {
+ proto.RegisterType((*HTTPIngressPath)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.networking.v1.HTTPIngressPath")
+ proto.RegisterType((*HTTPIngressRuleValue)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.networking.v1.HTTPIngressRuleValue")
+ proto.RegisterType((*IPBlock)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.networking.v1.IPBlock")
+ proto.RegisterType((*Ingress)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.networking.v1.Ingress")
+ proto.RegisterType((*IngressBackend)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.networking.v1.IngressBackend")
+ proto.RegisterType((*IngressClass)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.networking.v1.IngressClass")
+ proto.RegisterType((*IngressClassList)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.networking.v1.IngressClassList")
+ proto.RegisterType((*IngressClassParametersReference)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.networking.v1.IngressClassParametersReference")
+ proto.RegisterType((*IngressClassSpec)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.networking.v1.IngressClassSpec")
+ proto.RegisterType((*IngressList)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.networking.v1.IngressList")
+ proto.RegisterType((*IngressLoadBalancerIngress)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.networking.v1.IngressLoadBalancerIngress")
+ proto.RegisterType((*IngressLoadBalancerStatus)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.networking.v1.IngressLoadBalancerStatus")
+ proto.RegisterType((*IngressPortStatus)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.networking.v1.IngressPortStatus")
+ proto.RegisterType((*IngressRule)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.networking.v1.IngressRule")
+ proto.RegisterType((*IngressRuleValue)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.networking.v1.IngressRuleValue")
+ proto.RegisterType((*IngressServiceBackend)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.networking.v1.IngressServiceBackend")
+ proto.RegisterType((*IngressSpec)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.networking.v1.IngressSpec")
+ proto.RegisterType((*IngressStatus)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.networking.v1.IngressStatus")
+ proto.RegisterType((*IngressTLS)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.networking.v1.IngressTLS")
+ proto.RegisterType((*NetworkPolicy)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.networking.v1.NetworkPolicy")
+ proto.RegisterType((*NetworkPolicyEgressRule)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.networking.v1.NetworkPolicyEgressRule")
+ proto.RegisterType((*NetworkPolicyIngressRule)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.networking.v1.NetworkPolicyIngressRule")
+ proto.RegisterType((*NetworkPolicyList)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.networking.v1.NetworkPolicyList")
+ proto.RegisterType((*NetworkPolicyPeer)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.networking.v1.NetworkPolicyPeer")
+ proto.RegisterType((*NetworkPolicyPort)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.networking.v1.NetworkPolicyPort")
+ proto.RegisterType((*NetworkPolicySpec)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.networking.v1.NetworkPolicySpec")
+ proto.RegisterType((*ServiceBackendPort)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.networking.v1.ServiceBackendPort")
+}
+
+func init() {
+ proto.RegisterFile("github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1/generated.proto", fileDescriptor_5d3be2d57d520df2)
+}
+
+var fileDescriptor_5d3be2d57d520df2 = []byte{
+ // 1671 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xdd, 0x6b, 0x1b, 0x57,
+ 0x16, 0xf7, 0x95, 0x2c, 0xcb, 0xbe, 0xb2, 0x1d, 0xfb, 0x6e, 0xc2, 0x6a, 0x0d, 0x2b, 0x99, 0x81,
+ 0x5d, 0x0c, 0xbb, 0x2b, 0x11, 0xb3, 0x2c, 0x81, 0xc0, 0x6e, 0x76, 0x12, 0x27, 0x51, 0xe2, 0xc8,
+ 0xda, 0x6b, 0x6d, 0x16, 0x96, 0x52, 0x18, 0x8f, 0xae, 0xe5, 0xb1, 0x46, 0x73, 0x27, 0x77, 0xae,
+ 0xdc, 0x18, 0xf2, 0x90, 0xb6, 0x0f, 0xa1, 0x5f, 0x10, 0xe8, 0x3f, 0x50, 0xfa, 0xdc, 0x87, 0xfe,
+ 0x13, 0x2d, 0x79, 0x0c, 0x94, 0x42, 0xa0, 0x44, 0xd4, 0xea, 0x4b, 0x9f, 0xfb, 0xf1, 0xe2, 0xa7,
+ 0x72, 0x3f, 0xe6, 0x43, 0x1f, 0xa6, 0x75, 0x3c, 0xc1, 0x4f, 0x9a, 0x7b, 0xce, 0x99, 0xf3, 0x3b,
+ 0xe7, 0xdc, 0x33, 0xe7, 0xfe, 0x66, 0x04, 0xef, 0xb6, 0x1d, 0xbe, 0xd7, 0xdb, 0xa9, 0xd8, 0xb4,
+ 0x5b, 0xb5, 0x1d, 0xd7, 0xe9, 0x45, 0x3f, 0x7e, 0xa7, 0x5d, 0xed, 0x5c, 0x09, 0xaa, 0x81, 0xeb,
+ 0x74, 0xe5, 0x85, 0xe5, 0x3b, 0x55, 0x8f, 0xf0, 0xb7, 0x28, 0xeb, 0x38, 0x5e, 0xbb, 0x7a, 0x70,
+ 0xb9, 0xda, 0x26, 0x1e, 0x61, 0x16, 0x27, 0xad, 0x8a, 0xcf, 0x28, 0xa7, 0xe8, 0x6a, 0xec, 0xac,
+ 0xa2, 0xbc, 0x84, 0x3f, 0x7e, 0xa7, 0x5d, 0xe9, 0x5c, 0x09, 0x2a, 0xc2, 0x99, 0xbc, 0xb0, 0x7c,
+ 0xa7, 0x12, 0x3b, 0xab, 0x1c, 0x5c, 0x5e, 0xd9, 0x38, 0x5d, 0x24, 0x36, 0x65, 0x64, 0x42, 0x0c,
+ 0x2b, 0x37, 0x4f, 0xe5, 0x26, 0xa8, 0x76, 0x09, 0xb7, 0x26, 0xf9, 0xb9, 0x73, 0x4a, 0x3f, 0x3d,
+ 0xee, 0xb8, 0x55, 0xc7, 0xe3, 0x01, 0x67, 0x63, 0xbe, 0xfe, 0x96, 0xf0, 0xd5, 0xa6, 0x6d, 0x5a,
+ 0x95, 0xe2, 0x9d, 0xde, 0xae, 0x5c, 0xc9, 0x85, 0xbc, 0xd2, 0xe6, 0xc2, 0x69, 0xc5, 0xa1, 0xc2,
+ 0x6f, 0xd7, 0xb2, 0xf7, 0x1c, 0x8f, 0xb0, 0x43, 0x89, 0xca, 0x7a, 0x1e, 0x77, 0xba, 0x64, 0xcc,
+ 0xff, 0x3f, 0x7e, 0xed, 0x86, 0xc0, 0xde, 0x23, 0x5d, 0x6b, 0xf4, 0x3e, 0xe3, 0x08, 0xc0, 0x0b,
+ 0xb7, 0x9b, 0xcd, 0x46, 0xcd, 0x6b, 0x33, 0x12, 0x04, 0x0d, 0x8b, 0xef, 0xa1, 0x55, 0x38, 0xed,
+ 0x5b, 0x7c, 0xaf, 0x08, 0x56, 0xc1, 0xda, 0x9c, 0x39, 0xff, 0xac, 0x5f, 0x9e, 0x1a, 0xf4, 0xcb,
+ 0xd3, 0x42, 0x87, 0xa5, 0x06, 0xfd, 0x1d, 0xce, 0x8a, 0xdf, 0xe6, 0xa1, 0x4f, 0x8a, 0x59, 0x69,
+ 0x55, 0x1c, 0xf4, 0xcb, 0xb3, 0x0d, 0x2d, 0x3b, 0x4e, 0x5c, 0xe3, 0xc8, 0x12, 0x1d, 0xc0, 0xfc,
+ 0x8e, 0x65, 0x77, 0x88, 0xd7, 0x2a, 0x66, 0x56, 0xc1, 0x5a, 0x61, 0xfd, 0x6e, 0xe5, 0x0c, 0xdd,
+ 0x52, 0xd1, 0x21, 0x9b, 0xca, 0xa5, 0x79, 0x41, 0xc7, 0x99, 0xd7, 0x02, 0x1c, 0x82, 0x19, 0xef,
+ 0x01, 0x78, 0x31, 0x91, 0x23, 0xee, 0xb9, 0xe4, 0xbe, 0xe5, 0xf6, 0x08, 0x7a, 0x00, 0x73, 0x22,
+ 0xb8, 0xa0, 0x08, 0x56, 0xb3, 0x6b, 0x85, 0xf5, 0xcd, 0x33, 0x85, 0x33, 0x52, 0x45, 0x73, 0x41,
+ 0xc7, 0x93, 0x13, 0xab, 0x00, 0x2b, 0x24, 0x63, 0x0b, 0xe6, 0x6b, 0x0d, 0xd3, 0xa5, 0x76, 0x47,
+ 0x94, 0xd9, 0x76, 0x5a, 0x6c, 0xb4, 0xcc, 0xd7, 0x6b, 0x37, 0x30, 0x96, 0x1a, 0x64, 0xc0, 0x19,
+ 0xf2, 0xd0, 0x26, 0x3e, 0x2f, 0x66, 0x56, 0xb3, 0x6b, 0x73, 0x26, 0x1c, 0xf4, 0xcb, 0x33, 0x1b,
+ 0x52, 0x82, 0xb5, 0xc6, 0x78, 0x99, 0x81, 0x79, 0x0d, 0x8b, 0x18, 0x9c, 0x15, 0xbd, 0xdc, 0xb2,
+ 0xb8, 0x25, 0xbd, 0x16, 0xd6, 0xcd, 0xd3, 0xa5, 0x14, 0x54, 0xc4, 0xfd, 0x22, 0x9b, 0xad, 0x9d,
+ 0x7d, 0x62, 0xf3, 0x7b, 0x84, 0x5b, 0x26, 0xd2, 0x91, 0xc1, 0x58, 0x86, 0x23, 0x1c, 0xb4, 0x0f,
+ 0xa7, 0x03, 0x9f, 0xd8, 0x7a, 0x47, 0x6f, 0xa7, 0xb1, 0xa3, 0xdb, 0x3e, 0xb1, 0xe3, 0x7a, 0x88,
+ 0x15, 0x96, 0x18, 0x88, 0xc1, 0x99, 0x80, 0x5b, 0xbc, 0x17, 0xc8, 0xa6, 0x2b, 0xac, 0xdf, 0x49,
+ 0x05, 0x4d, 0x7a, 0x34, 0x17, 0x35, 0xde, 0x8c, 0x5a, 0x63, 0x8d, 0x64, 0x7c, 0x00, 0xe0, 0xe2,
+ 0x70, 0xa7, 0xa1, 0x43, 0x98, 0x0f, 0x08, 0x3b, 0x70, 0x6c, 0x52, 0x9c, 0x96, 0x71, 0xe0, 0x54,
+ 0xe2, 0x50, 0x2e, 0xc3, 0x76, 0x2e, 0x88, 0x56, 0xd6, 0x32, 0x1c, 0xe2, 0x19, 0x3f, 0x02, 0x38,
+ 0xaf, 0xed, 0xaf, 0xbb, 0xd6, 0x39, 0x6d, 0x39, 0x1d, 0xda, 0xf2, 0x7b, 0x69, 0x24, 0x2f, 0x93,
+ 0x39, 0x69, 0xdf, 0x8d, 0x1f, 0x00, 0x5c, 0x4a, 0x1a, 0x6e, 0x3a, 0x01, 0x47, 0xde, 0x58, 0xe6,
+ 0xd7, 0x5e, 0x35, 0x73, 0xe1, 0x4f, 0xe6, 0xbd, 0xa4, 0xc1, 0x67, 0x43, 0x49, 0x22, 0x6b, 0x0f,
+ 0xe6, 0x1c, 0x4e, 0xba, 0x81, 0x7c, 0x16, 0x0b, 0xeb, 0xb5, 0xd4, 0xd2, 0x8e, 0x27, 0x45, 0x4d,
+ 0xf8, 0xc7, 0x0a, 0xc6, 0x78, 0x09, 0x60, 0x39, 0x69, 0xd6, 0xb0, 0x98, 0xd5, 0x25, 0x9c, 0xb0,
+ 0x00, 0x93, 0x5d, 0xc2, 0x88, 0x67, 0x13, 0xb4, 0x06, 0x67, 0xad, 0x46, 0xed, 0x16, 0xa3, 0x3d,
+ 0x3f, 0x1c, 0x23, 0x22, 0xfa, 0x7f, 0x6b, 0x19, 0x8e, 0xb4, 0x62, 0xd8, 0x74, 0x1c, 0x3d, 0x78,
+ 0x13, 0xc3, 0xe6, 0xae, 0xe3, 0xb5, 0xb0, 0xd4, 0x08, 0x0b, 0xcf, 0xea, 0x86, 0xf3, 0x3c, 0xb2,
+ 0xa8, 0x5b, 0x5d, 0x82, 0xa5, 0x06, 0x95, 0x61, 0x2e, 0xb0, 0xa9, 0xaf, 0xba, 0x7e, 0xce, 0x9c,
+ 0x13, 0x21, 0x6f, 0x0b, 0x01, 0x56, 0x72, 0xf4, 0x17, 0x38, 0x27, 0x0c, 0x03, 0xdf, 0xb2, 0x49,
+ 0x31, 0x27, 0x8d, 0x16, 0x06, 0xfd, 0xf2, 0x5c, 0x3d, 0x14, 0xe2, 0x58, 0x6f, 0x7c, 0x3d, 0xb2,
+ 0xa9, 0x62, 0xbf, 0xd1, 0x3a, 0x84, 0x36, 0xf5, 0x38, 0xa3, 0xae, 0x4b, 0xc2, 0xc9, 0x18, 0x35,
+ 0xe3, 0xf5, 0x48, 0x83, 0x13, 0x56, 0xe8, 0x43, 0x00, 0xa1, 0x1f, 0x15, 0x47, 0x77, 0xe5, 0x1b,
+ 0xa9, 0x6d, 0xcf, 0x84, 0xba, 0x9b, 0x8b, 0x22, 0x9c, 0x84, 0x22, 0x81, 0x6f, 0x7c, 0x0f, 0x60,
+ 0x41, 0xdf, 0x7f, 0x2e, 0x7d, 0xea, 0x0c, 0xf7, 0xe9, 0x8d, 0x54, 0xce, 0xd8, 0xc9, 0x2d, 0xfa,
+ 0x0d, 0x80, 0x2b, 0x61, 0xaa, 0xd4, 0x6a, 0x99, 0x96, 0x6b, 0x79, 0x36, 0x61, 0xe1, 0x71, 0xb4,
+ 0x02, 0x33, 0x4e, 0xd8, 0x97, 0x50, 0x3b, 0xc8, 0xd4, 0x1a, 0x38, 0xe3, 0xf8, 0xe8, 0xaf, 0x70,
+ 0x76, 0x8f, 0x06, 0x5c, 0x76, 0x9c, 0xea, 0xc9, 0x28, 0xa7, 0xdb, 0x5a, 0x8e, 0x23, 0x0b, 0x14,
+ 0xc0, 0x9c, 0x4f, 0x19, 0x0f, 0x8a, 0xd3, 0x32, 0xa7, 0x7a, 0x1a, 0x39, 0x35, 0x28, 0xe3, 0x7a,
+ 0xf6, 0xc7, 0x47, 0xb5, 0x00, 0xc1, 0x0a, 0xcb, 0xf8, 0x04, 0xc0, 0x3f, 0x4c, 0xc8, 0x4e, 0xdd,
+ 0x83, 0xde, 0x01, 0x30, 0xef, 0x28, 0xad, 0xa6, 0x0f, 0xff, 0x4b, 0x23, 0xaa, 0x09, 0x75, 0x8c,
+ 0x99, 0x4d, 0x48, 0x60, 0x42, 0x60, 0xe3, 0x4b, 0x00, 0x97, 0xc7, 0xd2, 0x91, 0xfc, 0x8d, 0x32,
+ 0x2e, 0x2b, 0x9f, 0x4b, 0xf0, 0x37, 0xca, 0x38, 0x96, 0x1a, 0xd1, 0x93, 0x92, 0xfe, 0xd9, 0xd4,
+ 0xd5, 0xd5, 0xc7, 0x61, 0xf5, 0x1b, 0x5a, 0x7e, 0xdc, 0x2f, 0xff, 0xf3, 0x95, 0xe8, 0x78, 0x25,
+ 0xf4, 0x80, 0x23, 0x0c, 0x31, 0x39, 0x08, 0x63, 0x94, 0xe9, 0xe1, 0x22, 0x27, 0xc7, 0x86, 0x10,
+ 0x60, 0x25, 0x37, 0xbe, 0x88, 0x1f, 0x1a, 0x41, 0xcf, 0x44, 0x0a, 0x62, 0xf3, 0x47, 0xb9, 0x91,
+ 0x68, 0x0d, 0x2c, 0x35, 0xe8, 0x63, 0x00, 0x97, 0x9c, 0x11, 0x42, 0x97, 0xe6, 0x89, 0x14, 0x39,
+ 0x35, 0x8b, 0x1a, 0x7d, 0x69, 0x54, 0x83, 0xc7, 0x02, 0x30, 0xde, 0x8d, 0x87, 0x5a, 0x4c, 0x33,
+ 0x29, 0x9c, 0xde, 0xe3, 0xdc, 0xd7, 0x4f, 0xff, 0x7f, 0xd2, 0x62, 0x99, 0x71, 0x84, 0xb3, 0xb2,
+ 0x36, 0xcd, 0x66, 0x03, 0x4b, 0x20, 0xe3, 0x33, 0x00, 0x2f, 0x4d, 0x64, 0x15, 0xd1, 0x90, 0x07,
+ 0x27, 0x0e, 0xf9, 0x07, 0xba, 0x79, 0x54, 0x29, 0xb7, 0xce, 0x14, 0xec, 0x30, 0xb8, 0xe8, 0xbf,
+ 0x49, 0xdd, 0x68, 0x7c, 0x9e, 0x8d, 0x36, 0x5f, 0x1e, 0x02, 0xd7, 0xa2, 0x9d, 0x95, 0x03, 0x58,
+ 0x04, 0xa7, 0x8f, 0x9c, 0x8b, 0x89, 0x6d, 0x88, 0x74, 0x78, 0xcc, 0x1a, 0x3d, 0x01, 0x70, 0xb1,
+ 0x45, 0x76, 0xad, 0x9e, 0xcb, 0x35, 0xb8, 0x2e, 0x7e, 0xaa, 0x6f, 0x1c, 0x68, 0xd0, 0x2f, 0x2f,
+ 0xde, 0x18, 0x82, 0xc1, 0x23, 0xb0, 0x68, 0x17, 0x66, 0xb9, 0x1b, 0xce, 0xe2, 0x5b, 0x69, 0xa0,
+ 0x37, 0x37, 0xb7, 0xcd, 0x82, 0xae, 0x62, 0xb6, 0xb9, 0xb9, 0x8d, 0x05, 0x00, 0xea, 0xc2, 0x1c,
+ 0xeb, 0xb9, 0x44, 0x30, 0xe3, 0x6c, 0x5a, 0x3c, 0x5c, 0x34, 0x58, 0x3c, 0x1b, 0xc5, 0x2a, 0xc0,
+ 0x0a, 0xc5, 0xf8, 0x14, 0xc0, 0x85, 0x21, 0xfe, 0x8c, 0x9e, 0x02, 0x38, 0xef, 0x26, 0x86, 0x97,
+ 0x2e, 0xf8, 0xfd, 0xb4, 0x87, 0xa2, 0x1e, 0xd9, 0x17, 0x75, 0x58, 0xf3, 0x49, 0x1d, 0x1e, 0x8a,
+ 0xc0, 0xb0, 0x20, 0x8c, 0x6b, 0x26, 0x66, 0x90, 0x18, 0x1c, 0x6a, 0x5a, 0xeb, 0x19, 0x24, 0xe6,
+ 0x49, 0x80, 0x95, 0x5c, 0x70, 0x8f, 0x80, 0xd8, 0x8c, 0xf0, 0x7a, 0x7c, 0x28, 0x45, 0xdc, 0x63,
+ 0x3b, 0xd2, 0xe0, 0x84, 0x95, 0xf1, 0x33, 0x80, 0x0b, 0x75, 0x15, 0x72, 0x83, 0xba, 0x8e, 0x7d,
+ 0x78, 0x2e, 0x84, 0xdc, 0x1f, 0x22, 0xe4, 0x67, 0x3b, 0x1d, 0x87, 0xb2, 0x39, 0x91, 0x91, 0x1f,
+ 0x03, 0xf8, 0xfb, 0x21, 0xcb, 0x8d, 0x78, 0x76, 0x47, 0x87, 0x35, 0x48, 0xe1, 0xb0, 0x1e, 0x02,
+ 0x91, 0x13, 0x64, 0xe2, 0x61, 0x8d, 0xf6, 0x61, 0x86, 0x53, 0xfd, 0x98, 0xa5, 0x89, 0x48, 0x08,
+ 0x8b, 0xb9, 0x4b, 0x93, 0xe2, 0x0c, 0xa7, 0xc6, 0xdb, 0x19, 0x58, 0x1c, 0xb2, 0x4a, 0x9e, 0x5c,
+ 0xe7, 0x92, 0xbd, 0x0f, 0xa7, 0x77, 0x19, 0xed, 0xbe, 0xa6, 0xfc, 0xa3, 0x06, 0xb8, 0xc9, 0x68,
+ 0x17, 0x4b, 0x24, 0xe3, 0x27, 0x00, 0x97, 0x87, 0x2c, 0xcf, 0x85, 0xeb, 0xd2, 0x61, 0xae, 0x7b,
+ 0x27, 0xbd, 0xc4, 0x4f, 0x60, 0xbc, 0x4f, 0xb2, 0x23, 0x69, 0x8b, 0x02, 0xa1, 0x87, 0xb0, 0xe0,
+ 0xd3, 0xd6, 0x36, 0x71, 0x89, 0xcd, 0x69, 0x38, 0xf9, 0x36, 0x5e, 0x39, 0x73, 0x6b, 0x87, 0xb8,
+ 0xa1, 0x33, 0xf3, 0xc2, 0xa0, 0x5f, 0x2e, 0x34, 0x62, 0xef, 0x38, 0x09, 0x85, 0xde, 0x07, 0x70,
+ 0x39, 0x7a, 0xa5, 0x8a, 0x02, 0xc8, 0xa4, 0x19, 0xc0, 0xa5, 0x41, 0xbf, 0xbc, 0x5c, 0x1f, 0xc5,
+ 0xc0, 0xe3, 0xb0, 0xa8, 0x03, 0xf3, 0x8e, 0x2f, 0xbf, 0x6d, 0xe9, 0xef, 0x33, 0x67, 0x7c, 0xf7,
+ 0x50, 0xdf, 0xc9, 0xd4, 0x97, 0x10, 0xbd, 0xc0, 0x21, 0x82, 0xf1, 0x51, 0x66, 0x74, 0x27, 0x04,
+ 0xb1, 0xdd, 0x4f, 0x10, 0x5b, 0xc5, 0x71, 0xea, 0xaf, 0x8d, 0xd4, 0x92, 0x21, 0xa6, 0x74, 0xeb,
+ 0xb4, 0xd5, 0xee, 0x71, 0xc7, 0xad, 0xa8, 0xaf, 0xc5, 0x95, 0x9a, 0xc7, 0xb7, 0xd8, 0x36, 0x67,
+ 0x8e, 0xd7, 0x56, 0x64, 0x2e, 0xc1, 0xd5, 0xff, 0x04, 0xf3, 0x9a, 0x3c, 0xc9, 0xaa, 0xe6, 0x54,
+ 0x3d, 0x36, 0x94, 0x08, 0x87, 0x3a, 0xe3, 0xab, 0xd1, 0xce, 0x94, 0x54, 0xea, 0xd1, 0x6b, 0xec,
+ 0xcc, 0xdf, 0xe9, 0x27, 0xe4, 0xe4, 0xee, 0x7c, 0x9c, 0x78, 0x47, 0x52, 0x4f, 0xe8, 0x7f, 0xd3,
+ 0x7b, 0x42, 0x93, 0x24, 0xe5, 0xc4, 0x37, 0x24, 0xf4, 0x08, 0xce, 0x10, 0x15, 0x80, 0x22, 0x46,
+ 0xcd, 0xf4, 0x02, 0x88, 0x8f, 0xbc, 0xf8, 0xe3, 0xa1, 0x96, 0x69, 0x4c, 0xf4, 0x2f, 0x51, 0x7e,
+ 0x61, 0xdb, 0x3c, 0xf4, 0x89, 0x7a, 0x7b, 0x9d, 0x33, 0xff, 0xa8, 0x6a, 0x16, 0x89, 0x8f, 0xfb,
+ 0x65, 0x18, 0x2f, 0x71, 0xf2, 0x0e, 0xe3, 0x4d, 0x88, 0xc6, 0x49, 0xf4, 0x6f, 0x60, 0xf1, 0x7f,
+ 0x86, 0x33, 0x5e, 0xaf, 0xbb, 0x43, 0xd4, 0x2c, 0xc8, 0xc5, 0x01, 0xd6, 0xa5, 0x14, 0x6b, 0xad,
+ 0x69, 0x3d, 0x3b, 0x2a, 0x4d, 0x3d, 0x3f, 0x2a, 0x4d, 0xbd, 0x38, 0x2a, 0x4d, 0x3d, 0x1e, 0x94,
+ 0xc0, 0xb3, 0x41, 0x09, 0x3c, 0x1f, 0x94, 0xc0, 0x8b, 0x41, 0x09, 0x7c, 0x3b, 0x28, 0x81, 0xa7,
+ 0xdf, 0x95, 0xa6, 0xfe, 0x7f, 0xf5, 0x0c, 0xff, 0x10, 0xfd, 0x12, 0x00, 0x00, 0xff, 0xff, 0x10,
+ 0x85, 0x0f, 0x5a, 0x5f, 0x1a, 0x00, 0x00,
+}
+
+func (m *HTTPIngressPath) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *HTTPIngressPath) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *HTTPIngressPath) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.PathType != nil {
+ i -= len(*m.PathType)
+ copy(dAtA[i:], *m.PathType)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PathType)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ {
+ size, err := m.Backend.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Path)
+ copy(dAtA[i:], m.Path)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *HTTPIngressRuleValue) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *HTTPIngressRuleValue) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *HTTPIngressRuleValue) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Paths) > 0 {
+ for iNdEx := len(m.Paths) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Paths[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *IPBlock) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *IPBlock) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *IPBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Except) > 0 {
+ for iNdEx := len(m.Except) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Except[iNdEx])
+ copy(dAtA[i:], m.Except[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Except[iNdEx])))
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ i -= len(m.CIDR)
+ copy(dAtA[i:], m.CIDR)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.CIDR)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *Ingress) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Ingress) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Ingress) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *IngressBackend) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *IngressBackend) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *IngressBackend) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Service != nil {
+ {
+ size, err := m.Service.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *IngressClass) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *IngressClass) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *IngressClass) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *IngressClassList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *IngressClassList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *IngressClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *IngressClassParametersReference) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *IngressClassParametersReference) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *IngressClassParametersReference) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Namespace != nil {
+ i -= len(*m.Namespace)
+ copy(dAtA[i:], *m.Namespace)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Namespace)))
+ i--
+ dAtA[i] = 0x2a
+ }
+ if m.Scope != nil {
+ i -= len(*m.Scope)
+ copy(dAtA[i:], *m.Scope)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Scope)))
+ i--
+ dAtA[i] = 0x22
+ }
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0x1a
+ i -= len(m.Kind)
+ copy(dAtA[i:], m.Kind)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind)))
+ i--
+ dAtA[i] = 0x12
+ if m.APIGroup != nil {
+ i -= len(*m.APIGroup)
+ copy(dAtA[i:], *m.APIGroup)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.APIGroup)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *IngressClassSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *IngressClassSpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *IngressClassSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Parameters != nil {
+ {
+ size, err := m.Parameters.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ i -= len(m.Controller)
+ copy(dAtA[i:], m.Controller)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Controller)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *IngressList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *IngressList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *IngressList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *IngressLoadBalancerIngress) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *IngressLoadBalancerIngress) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *IngressLoadBalancerIngress) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Ports) > 0 {
+ for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ }
+ i -= len(m.Hostname)
+ copy(dAtA[i:], m.Hostname)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hostname)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.IP)
+ copy(dAtA[i:], m.IP)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.IP)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *IngressLoadBalancerStatus) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *IngressLoadBalancerStatus) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *IngressLoadBalancerStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Ingress) > 0 {
+ for iNdEx := len(m.Ingress) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Ingress[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *IngressPortStatus) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *IngressPortStatus) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *IngressPortStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Error != nil {
+ i -= len(*m.Error)
+ copy(dAtA[i:], *m.Error)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Error)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ i -= len(m.Protocol)
+ copy(dAtA[i:], m.Protocol)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Protocol)))
+ i--
+ dAtA[i] = 0x12
+ i = encodeVarintGenerated(dAtA, i, uint64(m.Port))
+ i--
+ dAtA[i] = 0x8
+ return len(dAtA) - i, nil
+}
+
+func (m *IngressRule) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *IngressRule) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *IngressRule) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.IngressRuleValue.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Host)
+ copy(dAtA[i:], m.Host)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *IngressRuleValue) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *IngressRuleValue) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *IngressRuleValue) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.HTTP != nil {
+ {
+ size, err := m.HTTP.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *IngressServiceBackend) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *IngressServiceBackend) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *IngressServiceBackend) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Port.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *IngressSpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *IngressSpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *IngressSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.IngressClassName != nil {
+ i -= len(*m.IngressClassName)
+ copy(dAtA[i:], *m.IngressClassName)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.IngressClassName)))
+ i--
+ dAtA[i] = 0x22
+ }
+ if len(m.Rules) > 0 {
+ for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if len(m.TLS) > 0 {
+ for iNdEx := len(m.TLS) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.TLS[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if m.DefaultBackend != nil {
+ {
+ size, err := m.DefaultBackend.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *IngressStatus) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *IngressStatus) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *IngressStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.LoadBalancer.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *IngressTLS) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *IngressTLS) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *IngressTLS) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.SecretName)
+ copy(dAtA[i:], m.SecretName)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretName)))
+ i--
+ dAtA[i] = 0x12
+ if len(m.Hosts) > 0 {
+ for iNdEx := len(m.Hosts) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Hosts[iNdEx])
+ copy(dAtA[i:], m.Hosts[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hosts[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *NetworkPolicy) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *NetworkPolicy) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *NetworkPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *NetworkPolicyEgressRule) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *NetworkPolicyEgressRule) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *NetworkPolicyEgressRule) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.To) > 0 {
+ for iNdEx := len(m.To) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.To[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if len(m.Ports) > 0 {
+ for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *NetworkPolicyIngressRule) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *NetworkPolicyIngressRule) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *NetworkPolicyIngressRule) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.From) > 0 {
+ for iNdEx := len(m.From) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.From[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if len(m.Ports) > 0 {
+ for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *NetworkPolicyList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *NetworkPolicyList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *NetworkPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *NetworkPolicyPeer) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *NetworkPolicyPeer) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *NetworkPolicyPeer) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.IPBlock != nil {
+ {
+ size, err := m.IPBlock.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.NamespaceSelector != nil {
+ {
+ size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.PodSelector != nil {
+ {
+ size, err := m.PodSelector.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *NetworkPolicyPort) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *NetworkPolicyPort) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *NetworkPolicyPort) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.EndPort != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.EndPort))
+ i--
+ dAtA[i] = 0x18
+ }
+ if m.Port != nil {
+ {
+ size, err := m.Port.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.Protocol != nil {
+ i -= len(*m.Protocol)
+ copy(dAtA[i:], *m.Protocol)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Protocol)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *NetworkPolicySpec) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *NetworkPolicySpec) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *NetworkPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.PolicyTypes) > 0 {
+ for iNdEx := len(m.PolicyTypes) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.PolicyTypes[iNdEx])
+ copy(dAtA[i:], m.PolicyTypes[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.PolicyTypes[iNdEx])))
+ i--
+ dAtA[i] = 0x22
+ }
+ }
+ if len(m.Egress) > 0 {
+ for iNdEx := len(m.Egress) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Egress[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if len(m.Ingress) > 0 {
+ for iNdEx := len(m.Ingress) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Ingress[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.PodSelector.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *ServiceBackendPort) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ServiceBackendPort) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ServiceBackendPort) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i = encodeVarintGenerated(dAtA, i, uint64(m.Number))
+ i--
+ dAtA[i] = 0x10
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
+ offset -= sovGenerated(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *HTTPIngressPath) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Path)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Backend.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.PathType != nil {
+ l = len(*m.PathType)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *HTTPIngressRuleValue) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Paths) > 0 {
+ for _, e := range m.Paths {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *IPBlock) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.CIDR)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Except) > 0 {
+ for _, s := range m.Except {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *Ingress) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Status.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *IngressBackend) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Service != nil {
+ l = m.Service.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *IngressClass) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *IngressClassList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *IngressClassParametersReference) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.APIGroup != nil {
+ l = len(*m.APIGroup)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ l = len(m.Kind)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.Scope != nil {
+ l = len(*m.Scope)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Namespace != nil {
+ l = len(*m.Namespace)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *IngressClassSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Controller)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.Parameters != nil {
+ l = m.Parameters.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *IngressList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *IngressLoadBalancerIngress) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.IP)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = len(m.Hostname)
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Ports) > 0 {
+ for _, e := range m.Ports {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *IngressLoadBalancerStatus) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Ingress) > 0 {
+ for _, e := range m.Ingress {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *IngressPortStatus) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ n += 1 + sovGenerated(uint64(m.Port))
+ l = len(m.Protocol)
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.Error != nil {
+ l = len(*m.Error)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *IngressRule) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Host)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.IngressRuleValue.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *IngressRuleValue) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.HTTP != nil {
+ l = m.HTTP.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *IngressServiceBackend) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Port.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *IngressSpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.DefaultBackend != nil {
+ l = m.DefaultBackend.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if len(m.TLS) > 0 {
+ for _, e := range m.TLS {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.Rules) > 0 {
+ for _, e := range m.Rules {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if m.IngressClassName != nil {
+ l = len(*m.IngressClassName)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *IngressStatus) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.LoadBalancer.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *IngressTLS) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Hosts) > 0 {
+ for _, s := range m.Hosts {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ l = len(m.SecretName)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *NetworkPolicy) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ l = m.Spec.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *NetworkPolicyEgressRule) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Ports) > 0 {
+ for _, e := range m.Ports {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.To) > 0 {
+ for _, e := range m.To {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *NetworkPolicyIngressRule) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Ports) > 0 {
+ for _, e := range m.Ports {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.From) > 0 {
+ for _, e := range m.From {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *NetworkPolicyList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *NetworkPolicyPeer) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.PodSelector != nil {
+ l = m.PodSelector.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.NamespaceSelector != nil {
+ l = m.NamespaceSelector.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.IPBlock != nil {
+ l = m.IPBlock.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
+func (m *NetworkPolicyPort) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Protocol != nil {
+ l = len(*m.Protocol)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.Port != nil {
+ l = m.Port.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ if m.EndPort != nil {
+ n += 1 + sovGenerated(uint64(*m.EndPort))
+ }
+ return n
+}
+
+func (m *NetworkPolicySpec) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.PodSelector.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Ingress) > 0 {
+ for _, e := range m.Ingress {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.Egress) > 0 {
+ for _, e := range m.Egress {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.PolicyTypes) > 0 {
+ for _, s := range m.PolicyTypes {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *ServiceBackendPort) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ n += 1 + l + sovGenerated(uint64(l))
+ n += 1 + sovGenerated(uint64(m.Number))
+ return n
+}
+
+func sovGenerated(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozGenerated(x uint64) (n int) {
+ return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *HTTPIngressPath) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&HTTPIngressPath{`,
+ `Path:` + fmt.Sprintf("%v", this.Path) + `,`,
+ `Backend:` + strings.Replace(strings.Replace(this.Backend.String(), "IngressBackend", "IngressBackend", 1), `&`, ``, 1) + `,`,
+ `PathType:` + valueToStringGenerated(this.PathType) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *HTTPIngressRuleValue) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForPaths := "[]HTTPIngressPath{"
+ for _, f := range this.Paths {
+ repeatedStringForPaths += strings.Replace(strings.Replace(f.String(), "HTTPIngressPath", "HTTPIngressPath", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForPaths += "}"
+ s := strings.Join([]string{`&HTTPIngressRuleValue{`,
+ `Paths:` + repeatedStringForPaths + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *IPBlock) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&IPBlock{`,
+ `CIDR:` + fmt.Sprintf("%v", this.CIDR) + `,`,
+ `Except:` + fmt.Sprintf("%v", this.Except) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Ingress) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Ingress{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "IngressSpec", "IngressSpec", 1), `&`, ``, 1) + `,`,
+ `Status:` + strings.Replace(strings.Replace(this.Status.String(), "IngressStatus", "IngressStatus", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *IngressBackend) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&IngressBackend{`,
+ `Service:` + strings.Replace(this.Service.String(), "IngressServiceBackend", "IngressServiceBackend", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *IngressClass) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&IngressClass{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "IngressClassSpec", "IngressClassSpec", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *IngressClassList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]IngressClass{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "IngressClass", "IngressClass", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&IngressClassList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *IngressClassParametersReference) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&IngressClassParametersReference{`,
+ `APIGroup:` + valueToStringGenerated(this.APIGroup) + `,`,
+ `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `Scope:` + valueToStringGenerated(this.Scope) + `,`,
+ `Namespace:` + valueToStringGenerated(this.Namespace) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *IngressClassSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&IngressClassSpec{`,
+ `Controller:` + fmt.Sprintf("%v", this.Controller) + `,`,
+ `Parameters:` + strings.Replace(this.Parameters.String(), "IngressClassParametersReference", "IngressClassParametersReference", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *IngressList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]Ingress{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Ingress", "Ingress", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&IngressList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *IngressLoadBalancerIngress) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForPorts := "[]IngressPortStatus{"
+ for _, f := range this.Ports {
+ repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "IngressPortStatus", "IngressPortStatus", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForPorts += "}"
+ s := strings.Join([]string{`&IngressLoadBalancerIngress{`,
+ `IP:` + fmt.Sprintf("%v", this.IP) + `,`,
+ `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`,
+ `Ports:` + repeatedStringForPorts + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *IngressLoadBalancerStatus) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForIngress := "[]IngressLoadBalancerIngress{"
+ for _, f := range this.Ingress {
+ repeatedStringForIngress += strings.Replace(strings.Replace(f.String(), "IngressLoadBalancerIngress", "IngressLoadBalancerIngress", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForIngress += "}"
+ s := strings.Join([]string{`&IngressLoadBalancerStatus{`,
+ `Ingress:` + repeatedStringForIngress + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *IngressPortStatus) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&IngressPortStatus{`,
+ `Port:` + fmt.Sprintf("%v", this.Port) + `,`,
+ `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`,
+ `Error:` + valueToStringGenerated(this.Error) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *IngressRule) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&IngressRule{`,
+ `Host:` + fmt.Sprintf("%v", this.Host) + `,`,
+ `IngressRuleValue:` + strings.Replace(strings.Replace(this.IngressRuleValue.String(), "IngressRuleValue", "IngressRuleValue", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *IngressRuleValue) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&IngressRuleValue{`,
+ `HTTP:` + strings.Replace(this.HTTP.String(), "HTTPIngressRuleValue", "HTTPIngressRuleValue", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *IngressServiceBackend) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&IngressServiceBackend{`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `Port:` + strings.Replace(strings.Replace(this.Port.String(), "ServiceBackendPort", "ServiceBackendPort", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *IngressSpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForTLS := "[]IngressTLS{"
+ for _, f := range this.TLS {
+ repeatedStringForTLS += strings.Replace(strings.Replace(f.String(), "IngressTLS", "IngressTLS", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForTLS += "}"
+ repeatedStringForRules := "[]IngressRule{"
+ for _, f := range this.Rules {
+ repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "IngressRule", "IngressRule", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForRules += "}"
+ s := strings.Join([]string{`&IngressSpec{`,
+ `DefaultBackend:` + strings.Replace(this.DefaultBackend.String(), "IngressBackend", "IngressBackend", 1) + `,`,
+ `TLS:` + repeatedStringForTLS + `,`,
+ `Rules:` + repeatedStringForRules + `,`,
+ `IngressClassName:` + valueToStringGenerated(this.IngressClassName) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *IngressStatus) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&IngressStatus{`,
+ `LoadBalancer:` + strings.Replace(strings.Replace(this.LoadBalancer.String(), "IngressLoadBalancerStatus", "IngressLoadBalancerStatus", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *IngressTLS) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&IngressTLS{`,
+ `Hosts:` + fmt.Sprintf("%v", this.Hosts) + `,`,
+ `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *NetworkPolicy) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&NetworkPolicy{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NetworkPolicySpec", "NetworkPolicySpec", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *NetworkPolicyEgressRule) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForPorts := "[]NetworkPolicyPort{"
+ for _, f := range this.Ports {
+ repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForPorts += "}"
+ repeatedStringForTo := "[]NetworkPolicyPeer{"
+ for _, f := range this.To {
+ repeatedStringForTo += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForTo += "}"
+ s := strings.Join([]string{`&NetworkPolicyEgressRule{`,
+ `Ports:` + repeatedStringForPorts + `,`,
+ `To:` + repeatedStringForTo + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *NetworkPolicyIngressRule) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForPorts := "[]NetworkPolicyPort{"
+ for _, f := range this.Ports {
+ repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForPorts += "}"
+ repeatedStringForFrom := "[]NetworkPolicyPeer{"
+ for _, f := range this.From {
+ repeatedStringForFrom += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForFrom += "}"
+ s := strings.Join([]string{`&NetworkPolicyIngressRule{`,
+ `Ports:` + repeatedStringForPorts + `,`,
+ `From:` + repeatedStringForFrom + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *NetworkPolicyList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]NetworkPolicy{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "NetworkPolicy", "NetworkPolicy", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&NetworkPolicyList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *NetworkPolicyPeer) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&NetworkPolicyPeer{`,
+ `PodSelector:` + strings.Replace(fmt.Sprintf("%v", this.PodSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`,
+ `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`,
+ `IPBlock:` + strings.Replace(this.IPBlock.String(), "IPBlock", "IPBlock", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *NetworkPolicyPort) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&NetworkPolicyPort{`,
+ `Protocol:` + valueToStringGenerated(this.Protocol) + `,`,
+ `Port:` + strings.Replace(fmt.Sprintf("%v", this.Port), "IntOrString", "intstr.IntOrString", 1) + `,`,
+ `EndPort:` + valueToStringGenerated(this.EndPort) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *NetworkPolicySpec) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForIngress := "[]NetworkPolicyIngressRule{"
+ for _, f := range this.Ingress {
+ repeatedStringForIngress += strings.Replace(strings.Replace(f.String(), "NetworkPolicyIngressRule", "NetworkPolicyIngressRule", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForIngress += "}"
+ repeatedStringForEgress := "[]NetworkPolicyEgressRule{"
+ for _, f := range this.Egress {
+ repeatedStringForEgress += strings.Replace(strings.Replace(f.String(), "NetworkPolicyEgressRule", "NetworkPolicyEgressRule", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForEgress += "}"
+ s := strings.Join([]string{`&NetworkPolicySpec{`,
+ `PodSelector:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.PodSelector), "LabelSelector", "v1.LabelSelector", 1), `&`, ``, 1) + `,`,
+ `Ingress:` + repeatedStringForIngress + `,`,
+ `Egress:` + repeatedStringForEgress + `,`,
+ `PolicyTypes:` + fmt.Sprintf("%v", this.PolicyTypes) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ServiceBackendPort) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ServiceBackendPort{`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `Number:` + fmt.Sprintf("%v", this.Number) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func valueToStringGenerated(v interface{}) string {
+ rv := reflect.ValueOf(v)
+ if rv.IsNil() {
+ return "nil"
+ }
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("*%v", pv)
+}
+func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: HTTPIngressPath: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: HTTPIngressPath: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Path = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Backend", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Backend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PathType", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := PathType(dAtA[iNdEx:postIndex])
+ m.PathType = &s
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: HTTPIngressRuleValue: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: HTTPIngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Paths = append(m.Paths, HTTPIngressPath{})
+ if err := m.Paths[len(m.Paths)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *IPBlock) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: IPBlock: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: IPBlock: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CIDR", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.CIDR = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Except", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Except = append(m.Except, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Ingress) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Ingress: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Ingress: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *IngressBackend) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: IngressBackend: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: IngressBackend: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Service == nil {
+ m.Service = &IngressServiceBackend{}
+ }
+ if err := m.Service.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *IngressClass) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: IngressClass: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: IngressClass: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *IngressClassList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: IngressClassList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: IngressClassList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, IngressClass{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *IngressClassParametersReference) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: IngressClassParametersReference: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: IngressClassParametersReference: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.APIGroup = &s
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Kind = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.Scope = &s
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.Namespace = &s
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *IngressClassSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: IngressClassSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: IngressClassSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Controller", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Controller = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Parameters == nil {
+ m.Parameters = &IngressClassParametersReference{}
+ }
+ if err := m.Parameters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *IngressList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: IngressList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: IngressList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, Ingress{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *IngressLoadBalancerIngress) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: IngressLoadBalancerIngress: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: IngressLoadBalancerIngress: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.IP = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Hostname = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Ports = append(m.Ports, IngressPortStatus{})
+ if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *IngressLoadBalancerStatus) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: IngressLoadBalancerStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: IngressLoadBalancerStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Ingress = append(m.Ingress, IngressLoadBalancerIngress{})
+ if err := m.Ingress[len(m.Ingress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *IngressPortStatus) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: IngressPortStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: IngressPortStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType)
+ }
+ m.Port = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Port |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Protocol = github_com_cilium_cilium_pkg_k8s_slim_k8s_api_core_v1.Protocol(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.Error = &s
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *IngressRule) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: IngressRule: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: IngressRule: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Host = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field IngressRuleValue", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.IngressRuleValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *IngressRuleValue) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: IngressRuleValue: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: IngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field HTTP", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.HTTP == nil {
+ m.HTTP = &HTTPIngressRuleValue{}
+ }
+ if err := m.HTTP.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *IngressServiceBackend) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: IngressServiceBackend: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: IngressServiceBackend: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Port.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *IngressSpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: IngressSpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: IngressSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DefaultBackend", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.DefaultBackend == nil {
+ m.DefaultBackend = &IngressBackend{}
+ }
+ if err := m.DefaultBackend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.TLS = append(m.TLS, IngressTLS{})
+ if err := m.TLS[len(m.TLS)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Rules = append(m.Rules, IngressRule{})
+ if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field IngressClassName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := string(dAtA[iNdEx:postIndex])
+ m.IngressClassName = &s
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *IngressStatus) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: IngressStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: IngressStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancer", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.LoadBalancer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *IngressTLS) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: IngressTLS: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: IngressTLS: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Hosts", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Hosts = append(m.Hosts, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.SecretName = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *NetworkPolicy) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: NetworkPolicy: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: NetworkPolicy: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: NetworkPolicyEgressRule: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: NetworkPolicyEgressRule: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Ports = append(m.Ports, NetworkPolicyPort{})
+ if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field To", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.To = append(m.To, NetworkPolicyPeer{})
+ if err := m.To[len(m.To)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: NetworkPolicyIngressRule: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: NetworkPolicyIngressRule: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Ports = append(m.Ports, NetworkPolicyPort{})
+ if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field From", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.From = append(m.From, NetworkPolicyPeer{})
+ if err := m.From[len(m.From)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: NetworkPolicyList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: NetworkPolicyList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, NetworkPolicy{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: NetworkPolicyPeer: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: NetworkPolicyPeer: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PodSelector", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.PodSelector == nil {
+ m.PodSelector = &v1.LabelSelector{}
+ }
+ if err := m.PodSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.NamespaceSelector == nil {
+ m.NamespaceSelector = &v1.LabelSelector{}
+ }
+ if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field IPBlock", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.IPBlock == nil {
+ m.IPBlock = &IPBlock{}
+ }
+ if err := m.IPBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: NetworkPolicyPort: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: NetworkPolicyPort: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ s := github_com_cilium_cilium_pkg_k8s_slim_k8s_api_core_v1.Protocol(dAtA[iNdEx:postIndex])
+ m.Protocol = &s
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Port == nil {
+ m.Port = &intstr.IntOrString{}
+ }
+ if err := m.Port.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field EndPort", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.EndPort = &v
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: NetworkPolicySpec: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: NetworkPolicySpec: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PodSelector", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.PodSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Ingress = append(m.Ingress, NetworkPolicyIngressRule{})
+ if err := m.Ingress[len(m.Ingress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Egress", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Egress = append(m.Egress, NetworkPolicyEgressRule{})
+ if err := m.Egress[len(m.Egress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PolicyTypes", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.PolicyTypes = append(m.PolicyTypes, PolicyType(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ServiceBackendPort) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ServiceBackendPort: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ServiceBackendPort: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Number", wireType)
+ }
+ m.Number = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Number |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipGenerated(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthGenerated
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupGenerated
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthGenerated
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1/generated.proto b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1/generated.proto
new file mode 100644
index 000000000..0ca15db77
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1/generated.proto
@@ -0,0 +1,526 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = "proto2";
+
+package github.com.cilium.cilium.pkg.k8s.slim.k8s.api.networking.v1;
+
+import "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/generated.proto";
+import "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/generated.proto";
+import "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1";
+
+// HTTPIngressPath associates a path with a backend. Incoming urls matching the
+// path are forwarded to the backend.
+message HTTPIngressPath {
+ // path is matched against the path of an incoming request. Currently it can
+ // contain characters disallowed from the conventional "path" part of a URL
+ // as defined by RFC 3986. Paths must begin with a '/' and must be present
+ // when using PathType with value "Exact" or "Prefix".
+ // +optional
+ optional string path = 1;
+
+ // pathType determines the interpretation of the path matching. PathType can
+ // be one of the following values:
+ // * Exact: Matches the URL path exactly.
+ // * Prefix: Matches based on a URL path prefix split by '/'. Matching is
+ // done on a path element by element basis. A path element refers is the
+ // list of labels in the path split by the '/' separator. A request is a
+ // match for path p if every p is an element-wise prefix of p of the
+ // request path. Note that if the last element of the path is a substring
+ // of the last element in request path, it is not a match (e.g. /foo/bar
+ // matches /foo/bar/baz, but does not match /foo/barbaz).
+ // * ImplementationSpecific: Interpretation of the Path matching is up to
+ // the IngressClass. Implementations can treat this as a separate PathType
+ // or treat it identically to Prefix or Exact path types.
+ // Implementations are required to support all path types.
+ optional string pathType = 3;
+
+ // backend defines the referenced service endpoint to which the traffic
+ // will be forwarded to.
+ optional IngressBackend backend = 2;
+}
+
+// HTTPIngressRuleValue is a list of http selectors pointing to backends.
+// In the example: http:///? -> backend where
+// where parts of the url correspond to RFC 3986, this resource will be used
+// to match against everything after the last '/' and before the first '?'
+// or '#'.
+message HTTPIngressRuleValue {
+ // paths is a collection of paths that map requests to backends.
+ // +listType=atomic
+ repeated HTTPIngressPath paths = 1;
+}
+
+// IPBlock describes a particular CIDR (Ex. "192.168.1.0/24","2001:db8::/64") that is allowed
+// to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs
+// that should not be included within this rule.
+message IPBlock {
+ // cidr is a string representing the IPBlock
+ // Valid examples are "192.168.1.0/24" or "2001:db8::/64"
+ optional string cidr = 1;
+
+ // except is a slice of CIDRs that should not be included within an IPBlock
+ // Valid examples are "192.168.1.0/24" or "2001:db8::/64"
+ // Except values will be rejected if they are outside the cidr range
+ // +optional
+ repeated string except = 2;
+}
+
+// Ingress is a collection of rules that allow inbound connections to reach the
+// endpoints defined by a backend. An Ingress can be configured to give services
+// externally-reachable urls, load balance traffic, terminate SSL, offer name
+// based virtual hosting etc.
+message Ingress {
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // +optional
+ optional github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // spec is the desired state of the Ingress.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ // +optional
+ optional IngressSpec spec = 2;
+
+ // status is the current state of the Ingress.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ // +optional
+ optional IngressStatus status = 3;
+}
+
+// IngressBackend describes all endpoints for a given service and port.
+message IngressBackend {
+ // service references a service as a backend.
+ // This is a mutually exclusive setting with "Resource".
+ // +optional
+ optional IngressServiceBackend service = 4;
+}
+
+// IngressClass represents the class of the Ingress, referenced by the Ingress
+// Spec. The `ingressclass.kubernetes.io/is-default-class` annotation can be
+// used to indicate that an IngressClass should be considered default. When a
+// single IngressClass resource has this annotation set to true, new Ingress
+// resources without a class specified will be assigned this default class.
+message IngressClass {
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // +optional
+ optional github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // spec is the desired state of the IngressClass.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ // +optional
+ optional IngressClassSpec spec = 2;
+}
+
+// IngressClassList is a collection of IngressClasses.
+message IngressClassList {
+ // Standard list metadata.
+ // +optional
+ optional github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.ListMeta metadata = 1;
+
+ // items is the list of IngressClasses.
+ repeated IngressClass items = 2;
+}
+
+// IngressClassParametersReference identifies an API object. This can be used
+// to specify a cluster or namespace-scoped resource.
+message IngressClassParametersReference {
+ // apiGroup is the group for the resource being referenced. If APIGroup is
+ // not specified, the specified Kind must be in the core API group. For any
+ // other third-party types, APIGroup is required.
+ // +optional
+ optional string aPIGroup = 1;
+
+ // kind is the type of resource being referenced.
+ optional string kind = 2;
+
+ // name is the name of resource being referenced.
+ optional string name = 3;
+
+ // scope represents if this refers to a cluster or namespace scoped resource.
+ // This may be set to "Cluster" (default) or "Namespace".
+ // +optional
+ optional string scope = 4;
+
+ // namespace is the namespace of the resource being referenced. This field is
+ // required when scope is set to "Namespace" and must be unset when scope is set to
+ // "Cluster".
+ // +optional
+ optional string namespace = 5;
+}
+
+// IngressClassSpec provides information about the class of an Ingress.
+message IngressClassSpec {
+ // controller refers to the name of the controller that should handle this
+ // class. This allows for different "flavors" that are controlled by the
+ // same controller. For example, you may have different parameters for the
+ // same implementing controller. This should be specified as a
+ // domain-prefixed path no more than 250 characters in length, e.g.
+ // "acme.io/ingress-controller". This field is immutable.
+ optional string controller = 1;
+
+ // parameters is a link to a custom resource containing additional
+ // configuration for the controller. This is optional if the controller does
+ // not require extra parameters.
+ // +optional
+ optional IngressClassParametersReference parameters = 2;
+}
+
+// IngressList is a collection of Ingress.
+message IngressList {
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // +optional
+ optional github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.ListMeta metadata = 1;
+
+ // items is the list of Ingress.
+ repeated Ingress items = 2;
+}
+
+// IngressLoadBalancerIngress represents the status of a load-balancer ingress point.
+message IngressLoadBalancerIngress {
+ // ip is set for load-balancer ingress points that are IP based.
+ // +optional
+ optional string ip = 1;
+
+ // hostname is set for load-balancer ingress points that are DNS based.
+ // +optional
+ optional string hostname = 2;
+
+ // ports provides information about the ports exposed by this LoadBalancer.
+ // +listType=atomic
+ // +optional
+ repeated IngressPortStatus ports = 4;
+}
+
+// IngressLoadBalancerStatus represents the status of a load-balancer.
+message IngressLoadBalancerStatus {
+ // ingress is a list containing ingress points for the load-balancer.
+ // +optional
+ repeated IngressLoadBalancerIngress ingress = 1;
+}
+
+// IngressPortStatus represents the error condition of a service port
+message IngressPortStatus {
+ // port is the port number of the ingress port.
+ optional int32 port = 1;
+
+ // protocol is the protocol of the ingress port.
+ // The supported values are: "TCP", "UDP", "SCTP"
+ optional string protocol = 2;
+
+ // error is to record the problem with the service port
+ // The format of the error shall comply with the following rules:
+ // - built-in error values shall be specified in this file and those shall use
+ // CamelCase names
+ // - cloud provider specific error values must have names that comply with the
+ // format foo.example.com/CamelCase.
+ // ---
+ // The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
+ // +optional
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:Pattern=`^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$`
+ // +kubebuilder:validation:MaxLength=316
+ optional string error = 3;
+}
+
+// IngressRule represents the rules mapping the paths under a specified host to
+// the related backend services. Incoming requests are first evaluated for a host
+// match, then routed to the backend associated with the matching IngressRuleValue.
+message IngressRule {
+ // host is the fully qualified domain name of a network host, as defined by RFC 3986.
+ // Note the following deviations from the "host" part of the
+ // URI as defined in RFC 3986:
+ // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to
+ // the IP in the Spec of the parent Ingress.
+ // 2. The `:` delimiter is not respected because ports are not allowed.
+ // Currently the port of an Ingress is implicitly :80 for http and
+ // :443 for https.
+ // Both these may change in the future.
+ // Incoming requests are matched against the host before the
+ // IngressRuleValue. If the host is unspecified, the Ingress routes all
+ // traffic based on the specified IngressRuleValue.
+ //
+ // host can be "precise" which is a domain name without the terminating dot of
+ // a network host (e.g. "foo.bar.com") or "wildcard", which is a domain name
+ // prefixed with a single wildcard label (e.g. "*.foo.com").
+ // The wildcard character '*' must appear by itself as the first DNS label and
+ // matches only a single label. You cannot have a wildcard label by itself (e.g. Host == "*").
+ // Requests will be matched against the Host field in the following way:
+ // 1. If host is precise, the request matches this rule if the http host header is equal to Host.
+ // 2. If host is a wildcard, then the request matches this rule if the http host header
+ // is to equal to the suffix (removing the first label) of the wildcard rule.
+ // +optional
+ optional string host = 1;
+
+ // IngressRuleValue represents a rule to route requests for this IngressRule.
+ // If unspecified, the rule defaults to a http catch-all. Whether that sends
+ // just traffic matching the host to the default backend or all traffic to the
+ // default backend, is left to the controller fulfilling the Ingress. Http is
+ // currently the only supported IngressRuleValue.
+ // +optional
+ optional IngressRuleValue ingressRuleValue = 2;
+}
+
+// IngressRuleValue represents a rule to apply against incoming requests. If the
+// rule is satisfied, the request is routed to the specified backend. Currently
+// mixing different types of rules in a single Ingress is disallowed, so exactly
+// one of the following must be set.
+message IngressRuleValue {
+ // +optional
+ optional HTTPIngressRuleValue http = 1;
+}
+
+// IngressServiceBackend references a Kubernetes Service as a Backend.
+message IngressServiceBackend {
+ // name is the referenced service. The service must exist in
+ // the same namespace as the Ingress object.
+ optional string name = 1;
+
+ // port of the referenced service. A port name or port number
+ // is required for a IngressServiceBackend.
+ optional ServiceBackendPort port = 2;
+}
+
+// IngressSpec describes the Ingress the user wishes to exist.
+message IngressSpec {
+ // ingressClassName is the name of an IngressClass cluster resource. Ingress
+ // controller implementations use this field to know whether they should be
+ // serving this Ingress resource, by a transitive connection
+ // (controller -> IngressClass -> Ingress resource). Although the
+ // `kubernetes.io/ingress.class` annotation (simple constant name) was never
+ // formally defined, it was widely supported by Ingress controllers to create
+ // a direct binding between Ingress controller and Ingress resources. Newly
+ // created Ingress resources should prefer using the field. However, even
+ // though the annotation is officially deprecated, for backwards compatibility
+ // reasons, ingress controllers should still honor that annotation if present.
+ // +optional
+ optional string ingressClassName = 4;
+
+ // defaultBackend is the backend that should handle requests that don't
+ // match any rule. If Rules are not specified, DefaultBackend must be specified.
+ // If DefaultBackend is not set, the handling of requests that do not match any
+ // of the rules will be up to the Ingress controller.
+ // +optional
+ optional IngressBackend defaultBackend = 1;
+
+ // tls represents the TLS configuration. Currently the Ingress only supports a
+ // single TLS port, 443. If multiple members of this list specify different hosts,
+ // they will be multiplexed on the same port according to the hostname specified
+ // through the SNI TLS extension, if the ingress controller fulfilling the
+ // ingress supports SNI.
+ // +listType=atomic
+ // +optional
+ repeated IngressTLS tls = 2;
+
+ // rules is a list of host rules used to configure the Ingress. If unspecified,
+ // or no rule matches, all traffic is sent to the default backend.
+ // +listType=atomic
+ // +optional
+ repeated IngressRule rules = 3;
+}
+
+// IngressStatus describe the current state of the Ingress.
+message IngressStatus {
+ // loadBalancer contains the current status of the load-balancer.
+ // +optional
+ optional IngressLoadBalancerStatus loadBalancer = 1;
+}
+
+// IngressTLS describes the transport layer security associated with an ingress.
+message IngressTLS {
+ // hosts is a list of hosts included in the TLS certificate. The values in
+ // this list must match the name/s used in the tlsSecret. Defaults to the
+ // wildcard host setting for the loadbalancer controller fulfilling this
+ // Ingress, if left unspecified.
+ // +listType=atomic
+ // +optional
+ repeated string hosts = 1;
+
+ // secretName is the name of the secret used to terminate TLS traffic on
+ // port 443. Field is left optional to allow TLS routing based on SNI
+ // hostname alone. If the SNI host in a listener conflicts with the "Host"
+ // header field used by an IngressRule, the SNI host is used for termination
+ // and value of the "Host" header is used for routing.
+ // +optional
+ optional string secretName = 2;
+}
+
+// NetworkPolicy describes what network traffic is allowed for a set of Pods
+message NetworkPolicy {
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // +optional
+ optional github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // spec represents the specification of the desired behavior for this NetworkPolicy.
+ // +optional
+ optional NetworkPolicySpec spec = 2;
+}
+
+// NetworkPolicyEgressRule describes a particular set of traffic that is allowed out of pods
+// matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to.
+// This type is beta-level in 1.8
+message NetworkPolicyEgressRule {
+ // ports is a list of destination ports for outgoing traffic.
+ // Each item in this list is combined using a logical OR. If this field is
+ // empty or missing, this rule matches all ports (traffic not restricted by port).
+ // If this field is present and contains at least one item, then this rule allows
+ // traffic only if the traffic matches at least one port in the list.
+ // +optional
+ repeated NetworkPolicyPort ports = 1;
+
+ // to is a list of destinations for outgoing traffic of pods selected for this rule.
+ // Items in this list are combined using a logical OR operation. If this field is
+ // empty or missing, this rule matches all destinations (traffic not restricted by
+ // destination). If this field is present and contains at least one item, this rule
+ // allows traffic only if the traffic matches at least one item in the to list.
+ // +optional
+ repeated NetworkPolicyPeer to = 2;
+}
+
+// NetworkPolicyIngressRule describes a particular set of traffic that is allowed to the pods
+// matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and from.
+message NetworkPolicyIngressRule {
+ // ports is a list of ports which should be made accessible on the pods selected for
+ // this rule. Each item in this list is combined using a logical OR. If this field is
+ // empty or missing, this rule matches all ports (traffic not restricted by port).
+ // If this field is present and contains at least one item, then this rule allows
+ // traffic only if the traffic matches at least one port in the list.
+ // +optional
+ repeated NetworkPolicyPort ports = 1;
+
+ // from is a list of sources which should be able to access the pods selected for this rule.
+ // Items in this list are combined using a logical OR operation. If this field is
+ // empty or missing, this rule matches all sources (traffic not restricted by
+ // source). If this field is present and contains at least one item, this rule
+ // allows traffic only if the traffic matches at least one item in the from list.
+ // +optional
+ repeated NetworkPolicyPeer from = 2;
+}
+
+// NetworkPolicyList is a list of NetworkPolicy objects.
+message NetworkPolicyList {
+ // Standard list metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // +optional
+ optional github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.ListMeta metadata = 1;
+
+ // items is a list of schema objects.
+ repeated NetworkPolicy items = 2;
+}
+
+// NetworkPolicyPeer describes a peer to allow traffic to/from. Only certain combinations of
+// fields are allowed
+message NetworkPolicyPeer {
+ // podSelector is a label selector which selects pods. This field follows standard label
+ // selector semantics; if present but empty, it selects all pods.
+ //
+ // If namespaceSelector is also set, then the NetworkPolicyPeer as a whole selects
+ // the pods matching podSelector in the Namespaces selected by NamespaceSelector.
+ // Otherwise it selects the pods matching podSelector in the policy's own namespace.
+ // +optional
+ optional github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.LabelSelector podSelector = 1;
+
+ // namespaceSelector selects namespaces using cluster-scoped labels. This field follows
+ // standard label selector semantics; if present but empty, it selects all namespaces.
+ //
+ // If podSelector is also set, then the NetworkPolicyPeer as a whole selects
+ // the pods matching podSelector in the namespaces selected by namespaceSelector.
+ // Otherwise it selects all pods in the namespaces selected by namespaceSelector.
+ // +optional
+ optional github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.LabelSelector namespaceSelector = 2;
+
+ // ipBlock defines policy on a particular IPBlock. If this field is set then
+ // neither of the other fields can be.
+ // +optional
+ optional IPBlock ipBlock = 3;
+}
+
+// NetworkPolicyPort describes a port to allow traffic on
+message NetworkPolicyPort {
+ // protocol represents the protocol (TCP, UDP, or SCTP) which traffic must match.
+ // If not specified, this field defaults to TCP.
+ // +optional
+ optional string protocol = 1;
+
+ // port represents the port on the given protocol. This can either be a numerical or named
+ // port on a pod. If this field is not provided, this matches all port names and
+ // numbers.
+ // If present, only traffic on the specified protocol AND port will be matched.
+ // +optional
+ optional github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.util.intstr.IntOrString port = 2;
+
+ // endPort indicates that the range of ports from port to endPort if set, inclusive,
+ // should be allowed by the policy. This field cannot be defined if the port field
+ // is not defined or if the port field is defined as a named (string) port.
+ // The endPort must be equal or greater than port.
+ // +optional
+ optional int32 endPort = 3;
+}
+
+// NetworkPolicySpec provides the specification of a NetworkPolicy
+message NetworkPolicySpec {
+ // podSelector selects the pods to which this NetworkPolicy object applies.
+ // The array of ingress rules is applied to any pods selected by this field.
+ // Multiple network policies can select the same set of pods. In this case,
+ // the ingress rules for each are combined additively.
+ // This field is NOT optional and follows standard label selector semantics.
+ // An empty podSelector matches all pods in this namespace.
+ optional github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.LabelSelector podSelector = 1;
+
+ // ingress is a list of ingress rules to be applied to the selected pods.
+ // Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod
+ // (and cluster policy otherwise allows the traffic), OR if the traffic source is
+ // the pod's local node, OR if the traffic matches at least one ingress rule
+ // across all of the NetworkPolicy objects whose podSelector matches the pod. If
+ // this field is empty then this NetworkPolicy does not allow any traffic (and serves
+ // solely to ensure that the pods it selects are isolated by default)
+ // +optional
+ repeated NetworkPolicyIngressRule ingress = 2;
+
+ // egress is a list of egress rules to be applied to the selected pods. Outgoing traffic
+ // is allowed if there are no NetworkPolicies selecting the pod (and cluster policy
+ // otherwise allows the traffic), OR if the traffic matches at least one egress rule
+ // across all of the NetworkPolicy objects whose podSelector matches the pod. If
+ // this field is empty then this NetworkPolicy limits all outgoing traffic (and serves
+ // solely to ensure that the pods it selects are isolated by default).
+ // This field is beta-level in 1.8
+ // +optional
+ repeated NetworkPolicyEgressRule egress = 3;
+
+ // policyTypes is a list of rule types that the NetworkPolicy relates to.
+ // Valid options are ["Ingress"], ["Egress"], or ["Ingress", "Egress"].
+ // If this field is not specified, it will default based on the existence of ingress or egress rules;
+ // policies that contain an egress section are assumed to affect egress, and all policies
+ // (whether or not they contain an ingress section) are assumed to affect ingress.
+ // If you want to write an egress-only policy, you must explicitly specify policyTypes [ "Egress" ].
+ // Likewise, if you want to write a policy that specifies that no egress is allowed,
+ // you must specify a policyTypes value that include "Egress" (since such a policy would not include
+ // an egress section and would otherwise default to just [ "Ingress" ]).
+ // This field is beta-level in 1.8
+ // +optional
+ repeated string policyTypes = 4;
+}
+
+// ServiceBackendPort is the service port being referenced.
+message ServiceBackendPort {
+ // name is the name of the port on the Service.
+ // This is a mutually exclusive setting with "Number".
+ // +optional
+ optional string name = 1;
+
+ // number is the numerical port number (e.g. 80) on the Service.
+ // This is a mutually exclusive setting with "Name".
+ // +optional
+ optional int32 number = 2;
+}
+
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1/register.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1/register.go
new file mode 100644
index 000000000..eca48bc0a
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1/register.go
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Copyright 2017 The Kubernetes Authors.
+
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// GroupName is the group name use in this package
+const GroupName = "networking.k8s.io"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+ return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+ // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api.
+ // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
+ SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
+ localSchemeBuilder = &SchemeBuilder
+ AddToScheme = localSchemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to the given scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &Ingress{},
+ &IngressList{},
+ &IngressClass{},
+ &IngressClassList{},
+ &NetworkPolicy{},
+ &NetworkPolicyList{},
+ )
+
+ metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1/types.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1/types.go
new file mode 100644
index 000000000..47b45e2b0
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1/types.go
@@ -0,0 +1,603 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Copyright 2017 The Kubernetes Authors.
+
+package v1
+
+import (
+ slim_corev1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1"
+ v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1"
+ slim_metav1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1"
+ "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr"
+)
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// NetworkPolicy describes what network traffic is allowed for a set of Pods
+type NetworkPolicy struct {
+ slim_metav1.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // +optional
+ slim_metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // spec represents the specification of the desired behavior for this NetworkPolicy.
+ // +optional
+ Spec NetworkPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+
+ // Status is tombstoned to show why 3 is a reserved protobuf tag.
+ // This commented field should remain, so in the future if we decide to reimplement
+ // NetworkPolicyStatus a different protobuf name and tag SHOULD be used!
+ // Status NetworkPolicyStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// PolicyType string describes the NetworkPolicy type
+// This type is beta-level in 1.8
+// +enum
+type PolicyType string
+
+const (
+ // PolicyTypeIngress is a NetworkPolicy that affects ingress traffic on selected pods
+ PolicyTypeIngress PolicyType = "Ingress"
+ // PolicyTypeEgress is a NetworkPolicy that affects egress traffic on selected pods
+ PolicyTypeEgress PolicyType = "Egress"
+)
+
+// NetworkPolicySpec provides the specification of a NetworkPolicy
+type NetworkPolicySpec struct {
+ // podSelector selects the pods to which this NetworkPolicy object applies.
+ // The array of ingress rules is applied to any pods selected by this field.
+ // Multiple network policies can select the same set of pods. In this case,
+ // the ingress rules for each are combined additively.
+ // This field is NOT optional and follows standard label selector semantics.
+ // An empty podSelector matches all pods in this namespace.
+ PodSelector slim_metav1.LabelSelector `json:"podSelector" protobuf:"bytes,1,opt,name=podSelector"`
+
+ // ingress is a list of ingress rules to be applied to the selected pods.
+ // Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod
+ // (and cluster policy otherwise allows the traffic), OR if the traffic source is
+ // the pod's local node, OR if the traffic matches at least one ingress rule
+ // across all of the NetworkPolicy objects whose podSelector matches the pod. If
+ // this field is empty then this NetworkPolicy does not allow any traffic (and serves
+ // solely to ensure that the pods it selects are isolated by default)
+ // +optional
+ Ingress []NetworkPolicyIngressRule `json:"ingress,omitempty" protobuf:"bytes,2,rep,name=ingress"`
+
+ // egress is a list of egress rules to be applied to the selected pods. Outgoing traffic
+ // is allowed if there are no NetworkPolicies selecting the pod (and cluster policy
+ // otherwise allows the traffic), OR if the traffic matches at least one egress rule
+ // across all of the NetworkPolicy objects whose podSelector matches the pod. If
+ // this field is empty then this NetworkPolicy limits all outgoing traffic (and serves
+ // solely to ensure that the pods it selects are isolated by default).
+ // This field is beta-level in 1.8
+ // +optional
+ Egress []NetworkPolicyEgressRule `json:"egress,omitempty" protobuf:"bytes,3,rep,name=egress"`
+
+ // policyTypes is a list of rule types that the NetworkPolicy relates to.
+ // Valid options are ["Ingress"], ["Egress"], or ["Ingress", "Egress"].
+ // If this field is not specified, it will default based on the existence of ingress or egress rules;
+ // policies that contain an egress section are assumed to affect egress, and all policies
+ // (whether or not they contain an ingress section) are assumed to affect ingress.
+ // If you want to write an egress-only policy, you must explicitly specify policyTypes [ "Egress" ].
+ // Likewise, if you want to write a policy that specifies that no egress is allowed,
+ // you must specify a policyTypes value that include "Egress" (since such a policy would not include
+ // an egress section and would otherwise default to just [ "Ingress" ]).
+ // This field is beta-level in 1.8
+ // +optional
+ PolicyTypes []PolicyType `json:"policyTypes,omitempty" protobuf:"bytes,4,rep,name=policyTypes,casttype=PolicyType"`
+}
+
+// NetworkPolicyIngressRule describes a particular set of traffic that is allowed to the pods
+// matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and from.
+type NetworkPolicyIngressRule struct {
+ // ports is a list of ports which should be made accessible on the pods selected for
+ // this rule. Each item in this list is combined using a logical OR. If this field is
+ // empty or missing, this rule matches all ports (traffic not restricted by port).
+ // If this field is present and contains at least one item, then this rule allows
+ // traffic only if the traffic matches at least one port in the list.
+ // +optional
+ Ports []NetworkPolicyPort `json:"ports,omitempty" protobuf:"bytes,1,rep,name=ports"`
+
+ // from is a list of sources which should be able to access the pods selected for this rule.
+ // Items in this list are combined using a logical OR operation. If this field is
+ // empty or missing, this rule matches all sources (traffic not restricted by
+ // source). If this field is present and contains at least one item, this rule
+ // allows traffic only if the traffic matches at least one item in the from list.
+ // +optional
+ From []NetworkPolicyPeer `json:"from,omitempty" protobuf:"bytes,2,rep,name=from"`
+}
+
+// NetworkPolicyEgressRule describes a particular set of traffic that is allowed out of pods
+// matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to.
+// This type is beta-level in 1.8
+type NetworkPolicyEgressRule struct {
+ // ports is a list of destination ports for outgoing traffic.
+ // Each item in this list is combined using a logical OR. If this field is
+ // empty or missing, this rule matches all ports (traffic not restricted by port).
+ // If this field is present and contains at least one item, then this rule allows
+ // traffic only if the traffic matches at least one port in the list.
+ // +optional
+ Ports []NetworkPolicyPort `json:"ports,omitempty" protobuf:"bytes,1,rep,name=ports"`
+
+ // to is a list of destinations for outgoing traffic of pods selected for this rule.
+ // Items in this list are combined using a logical OR operation. If this field is
+ // empty or missing, this rule matches all destinations (traffic not restricted by
+ // destination). If this field is present and contains at least one item, this rule
+ // allows traffic only if the traffic matches at least one item in the to list.
+ // +optional
+ To []NetworkPolicyPeer `json:"to,omitempty" protobuf:"bytes,2,rep,name=to"`
+}
+
+// NetworkPolicyPort describes a port to allow traffic on
+type NetworkPolicyPort struct {
+ // protocol represents the protocol (TCP, UDP, or SCTP) which traffic must match.
+ // If not specified, this field defaults to TCP.
+ // +optional
+ Protocol *slim_corev1.Protocol `json:"protocol,omitempty" protobuf:"bytes,1,opt,name=protocol,casttype=k8s.io/api/core/v1.Protocol"`
+
+ // port represents the port on the given protocol. This can either be a numerical or named
+ // port on a pod. If this field is not provided, this matches all port names and
+ // numbers.
+ // If present, only traffic on the specified protocol AND port will be matched.
+ // +optional
+ Port *intstr.IntOrString `json:"port,omitempty" protobuf:"bytes,2,opt,name=port"`
+
+ // endPort indicates that the range of ports from port to endPort if set, inclusive,
+ // should be allowed by the policy. This field cannot be defined if the port field
+ // is not defined or if the port field is defined as a named (string) port.
+ // The endPort must be equal or greater than port.
+ // +optional
+ EndPort *int32 `json:"endPort,omitempty" protobuf:"bytes,3,opt,name=endPort"`
+}
+
+// IPBlock describes a particular CIDR (Ex. "192.168.1.0/24","2001:db8::/64") that is allowed
+// to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs
+// that should not be included within this rule.
+type IPBlock struct {
+ // cidr is a string representing the IPBlock
+ // Valid examples are "192.168.1.0/24" or "2001:db8::/64"
+ CIDR string `json:"cidr" protobuf:"bytes,1,name=cidr"`
+
+ // except is a slice of CIDRs that should not be included within an IPBlock
+ // Valid examples are "192.168.1.0/24" or "2001:db8::/64"
+ // Except values will be rejected if they are outside the cidr range
+ // +optional
+ Except []string `json:"except,omitempty" protobuf:"bytes,2,rep,name=except"`
+}
+
+// NetworkPolicyPeer describes a peer to allow traffic to/from. Only certain combinations of
+// fields are allowed
+type NetworkPolicyPeer struct {
+ // podSelector is a label selector which selects pods. This field follows standard label
+ // selector semantics; if present but empty, it selects all pods.
+ //
+ // If namespaceSelector is also set, then the NetworkPolicyPeer as a whole selects
+ // the pods matching podSelector in the Namespaces selected by NamespaceSelector.
+ // Otherwise it selects the pods matching podSelector in the policy's own namespace.
+ // +optional
+ PodSelector *slim_metav1.LabelSelector `json:"podSelector,omitempty" protobuf:"bytes,1,opt,name=podSelector"`
+
+ // namespaceSelector selects namespaces using cluster-scoped labels. This field follows
+ // standard label selector semantics; if present but empty, it selects all namespaces.
+ //
+ // If podSelector is also set, then the NetworkPolicyPeer as a whole selects
+ // the pods matching podSelector in the namespaces selected by namespaceSelector.
+ // Otherwise it selects all pods in the namespaces selected by namespaceSelector.
+ // +optional
+ NamespaceSelector *slim_metav1.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,2,opt,name=namespaceSelector"`
+
+ // ipBlock defines policy on a particular IPBlock. If this field is set then
+ // neither of the other fields can be.
+ // +optional
+ IPBlock *IPBlock `json:"ipBlock,omitempty" protobuf:"bytes,3,rep,name=ipBlock"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// NetworkPolicyList is a list of NetworkPolicy objects.
+type NetworkPolicyList struct {
+ slim_metav1.TypeMeta `json:",inline"`
+
+ // Standard list metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // +optional
+ slim_metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // items is a list of schema objects.
+ Items []NetworkPolicy `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Ingress is a collection of rules that allow inbound connections to reach the
+// endpoints defined by a backend. An Ingress can be configured to give services
+// externally-reachable urls, load balance traffic, terminate SSL, offer name
+// based virtual hosting etc.
+type Ingress struct {
+ slim_metav1.TypeMeta `json:",inline"`
+
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // +optional
+ slim_metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // spec is the desired state of the Ingress.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ // +optional
+ Spec IngressSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+
+ // status is the current state of the Ingress.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ // +optional
+ Status IngressStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// IngressList is a collection of Ingress.
+type IngressList struct {
+ slim_metav1.TypeMeta `json:",inline"`
+
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // +optional
+ slim_metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // items is the list of Ingress.
+ Items []Ingress `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// IngressSpec describes the Ingress the user wishes to exist.
+type IngressSpec struct {
+ // ingressClassName is the name of an IngressClass cluster resource. Ingress
+ // controller implementations use this field to know whether they should be
+ // serving this Ingress resource, by a transitive connection
+ // (controller -> IngressClass -> Ingress resource). Although the
+ // `kubernetes.io/ingress.class` annotation (simple constant name) was never
+ // formally defined, it was widely supported by Ingress controllers to create
+ // a direct binding between Ingress controller and Ingress resources. Newly
+ // created Ingress resources should prefer using the field. However, even
+ // though the annotation is officially deprecated, for backwards compatibility
+ // reasons, ingress controllers should still honor that annotation if present.
+ // +optional
+ IngressClassName *string `json:"ingressClassName,omitempty" protobuf:"bytes,4,opt,name=ingressClassName"`
+
+ // defaultBackend is the backend that should handle requests that don't
+ // match any rule. If Rules are not specified, DefaultBackend must be specified.
+ // If DefaultBackend is not set, the handling of requests that do not match any
+ // of the rules will be up to the Ingress controller.
+ // +optional
+ DefaultBackend *IngressBackend `json:"defaultBackend,omitempty" protobuf:"bytes,1,opt,name=defaultBackend"`
+
+ // tls represents the TLS configuration. Currently the Ingress only supports a
+ // single TLS port, 443. If multiple members of this list specify different hosts,
+ // they will be multiplexed on the same port according to the hostname specified
+ // through the SNI TLS extension, if the ingress controller fulfilling the
+ // ingress supports SNI.
+ // +listType=atomic
+ // +optional
+ TLS []IngressTLS `json:"tls,omitempty" protobuf:"bytes,2,rep,name=tls"`
+
+ // rules is a list of host rules used to configure the Ingress. If unspecified,
+ // or no rule matches, all traffic is sent to the default backend.
+ // +listType=atomic
+ // +optional
+ Rules []IngressRule `json:"rules,omitempty" protobuf:"bytes,3,rep,name=rules"`
+}
+
+// IngressTLS describes the transport layer security associated with an ingress.
+type IngressTLS struct {
+ // hosts is a list of hosts included in the TLS certificate. The values in
+ // this list must match the name/s used in the tlsSecret. Defaults to the
+ // wildcard host setting for the loadbalancer controller fulfilling this
+ // Ingress, if left unspecified.
+ // +listType=atomic
+ // +optional
+ Hosts []string `json:"hosts,omitempty" protobuf:"bytes,1,rep,name=hosts"`
+
+ // secretName is the name of the secret used to terminate TLS traffic on
+ // port 443. Field is left optional to allow TLS routing based on SNI
+ // hostname alone. If the SNI host in a listener conflicts with the "Host"
+ // header field used by an IngressRule, the SNI host is used for termination
+ // and value of the "Host" header is used for routing.
+ // +optional
+ SecretName string `json:"secretName,omitempty" protobuf:"bytes,2,opt,name=secretName"`
+}
+
+// IngressStatus describe the current state of the Ingress.
+type IngressStatus struct {
+ // loadBalancer contains the current status of the load-balancer.
+ // +optional
+ LoadBalancer IngressLoadBalancerStatus `json:"loadBalancer,omitempty" protobuf:"bytes,1,opt,name=loadBalancer"`
+}
+
+// IngressLoadBalancerStatus represents the status of a load-balancer.
+type IngressLoadBalancerStatus struct {
+ // ingress is a list containing ingress points for the load-balancer.
+ // +optional
+ Ingress []IngressLoadBalancerIngress `json:"ingress,omitempty" protobuf:"bytes,1,rep,name=ingress"`
+}
+
+// IngressLoadBalancerIngress represents the status of a load-balancer ingress point.
+type IngressLoadBalancerIngress struct {
+ // ip is set for load-balancer ingress points that are IP based.
+ // +optional
+ IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"`
+
+ // hostname is set for load-balancer ingress points that are DNS based.
+ // +optional
+ Hostname string `json:"hostname,omitempty" protobuf:"bytes,2,opt,name=hostname"`
+
+ // ports provides information about the ports exposed by this LoadBalancer.
+ // +listType=atomic
+ // +optional
+ Ports []IngressPortStatus `json:"ports,omitempty" protobuf:"bytes,4,rep,name=ports"`
+}
+
+// IngressPortStatus represents the error condition of a service port
+type IngressPortStatus struct {
+ // port is the port number of the ingress port.
+ Port int32 `json:"port" protobuf:"varint,1,opt,name=port"`
+
+ // protocol is the protocol of the ingress port.
+ // The supported values are: "TCP", "UDP", "SCTP"
+ Protocol v1.Protocol `json:"protocol" protobuf:"bytes,2,opt,name=protocol,casttype=Protocol"`
+
+ // error is to record the problem with the service port
+ // The format of the error shall comply with the following rules:
+ // - built-in error values shall be specified in this file and those shall use
+ // CamelCase names
+ // - cloud provider specific error values must have names that comply with the
+ // format foo.example.com/CamelCase.
+ // ---
+ // The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
+ // +optional
+ // +kubebuilder:validation:Required
+ // +kubebuilder:validation:Pattern=`^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$`
+ // +kubebuilder:validation:MaxLength=316
+ Error *string `json:"error,omitempty" protobuf:"bytes,3,opt,name=error"`
+}
+
+// IngressRule represents the rules mapping the paths under a specified host to
+// the related backend services. Incoming requests are first evaluated for a host
+// match, then routed to the backend associated with the matching IngressRuleValue.
+type IngressRule struct {
+ // host is the fully qualified domain name of a network host, as defined by RFC 3986.
+ // Note the following deviations from the "host" part of the
+ // URI as defined in RFC 3986:
+ // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to
+ // the IP in the Spec of the parent Ingress.
+ // 2. The `:` delimiter is not respected because ports are not allowed.
+ // Currently the port of an Ingress is implicitly :80 for http and
+ // :443 for https.
+ // Both these may change in the future.
+ // Incoming requests are matched against the host before the
+ // IngressRuleValue. If the host is unspecified, the Ingress routes all
+ // traffic based on the specified IngressRuleValue.
+ //
+ // host can be "precise" which is a domain name without the terminating dot of
+ // a network host (e.g. "foo.bar.com") or "wildcard", which is a domain name
+ // prefixed with a single wildcard label (e.g. "*.foo.com").
+ // The wildcard character '*' must appear by itself as the first DNS label and
+ // matches only a single label. You cannot have a wildcard label by itself (e.g. Host == "*").
+ // Requests will be matched against the Host field in the following way:
+ // 1. If host is precise, the request matches this rule if the http host header is equal to Host.
+ // 2. If host is a wildcard, then the request matches this rule if the http host header
+ // is to equal to the suffix (removing the first label) of the wildcard rule.
+ // +optional
+ Host string `json:"host,omitempty" protobuf:"bytes,1,opt,name=host"`
+ // IngressRuleValue represents a rule to route requests for this IngressRule.
+ // If unspecified, the rule defaults to a http catch-all. Whether that sends
+ // just traffic matching the host to the default backend or all traffic to the
+ // default backend, is left to the controller fulfilling the Ingress. Http is
+ // currently the only supported IngressRuleValue.
+ // +optional
+ IngressRuleValue `json:",inline,omitempty" protobuf:"bytes,2,opt,name=ingressRuleValue"`
+}
+
+// IngressRuleValue represents a rule to apply against incoming requests. If the
+// rule is satisfied, the request is routed to the specified backend. Currently
+// mixing different types of rules in a single Ingress is disallowed, so exactly
+// one of the following must be set.
+type IngressRuleValue struct {
+ // +optional
+ HTTP *HTTPIngressRuleValue `json:"http,omitempty" protobuf:"bytes,1,opt,name=http"`
+}
+
+// HTTPIngressRuleValue is a list of http selectors pointing to backends.
+// In the example: http:///? -> backend where
+// where parts of the url correspond to RFC 3986, this resource will be used
+// to match against everything after the last '/' and before the first '?'
+// or '#'.
+type HTTPIngressRuleValue struct {
+ // paths is a collection of paths that map requests to backends.
+ // +listType=atomic
+ Paths []HTTPIngressPath `json:"paths" protobuf:"bytes,1,rep,name=paths"`
+}
+
+// PathType represents the type of path referred to by a HTTPIngressPath.
+// +enum
+type PathType string
+
+const (
+ // PathTypeExact matches the URL path exactly and with case sensitivity.
+ PathTypeExact = PathType("Exact")
+
+ // PathTypePrefix matches based on a URL path prefix split by '/'. Matching
+ // is case sensitive and done on a path element by element basis. A path
+ // element refers to the list of labels in the path split by the '/'
+ // separator. A request is a match for path p if every p is an element-wise
+ // prefix of p of the request path. Note that if the last element of the
+ // path is a substring of the last element in request path, it is not a
+ // match (e.g. /foo/bar matches /foo/bar/baz, but does not match
+ // /foo/barbaz). If multiple matching paths exist in an Ingress spec, the
+ // longest matching path is given priority.
+ // Examples:
+ // - /foo/bar does not match requests to /foo/barbaz
+ // - /foo/bar matches request to /foo/bar and /foo/bar/baz
+ // - /foo and /foo/ both match requests to /foo and /foo/. If both paths are
+ // present in an Ingress spec, the longest matching path (/foo/) is given
+ // priority.
+ PathTypePrefix = PathType("Prefix")
+
+ // PathTypeImplementationSpecific matching is up to the IngressClass.
+ // Implementations can treat this as a separate PathType or treat it
+ // identically to Prefix or Exact path types.
+ PathTypeImplementationSpecific = PathType("ImplementationSpecific")
+)
+
+// HTTPIngressPath associates a path with a backend. Incoming urls matching the
+// path are forwarded to the backend.
+type HTTPIngressPath struct {
+ // path is matched against the path of an incoming request. Currently it can
+ // contain characters disallowed from the conventional "path" part of a URL
+ // as defined by RFC 3986. Paths must begin with a '/' and must be present
+ // when using PathType with value "Exact" or "Prefix".
+ // +optional
+ Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"`
+
+ // pathType determines the interpretation of the path matching. PathType can
+ // be one of the following values:
+ // * Exact: Matches the URL path exactly.
+ // * Prefix: Matches based on a URL path prefix split by '/'. Matching is
+ // done on a path element by element basis. A path element refers is the
+ // list of labels in the path split by the '/' separator. A request is a
+ // match for path p if every p is an element-wise prefix of p of the
+ // request path. Note that if the last element of the path is a substring
+ // of the last element in request path, it is not a match (e.g. /foo/bar
+ // matches /foo/bar/baz, but does not match /foo/barbaz).
+ // * ImplementationSpecific: Interpretation of the Path matching is up to
+ // the IngressClass. Implementations can treat this as a separate PathType
+ // or treat it identically to Prefix or Exact path types.
+ // Implementations are required to support all path types.
+ PathType *PathType `json:"pathType" protobuf:"bytes,3,opt,name=pathType"`
+
+ // backend defines the referenced service endpoint to which the traffic
+ // will be forwarded to.
+ Backend IngressBackend `json:"backend" protobuf:"bytes,2,opt,name=backend"`
+}
+
+// IngressBackend describes all endpoints for a given service and port.
+type IngressBackend struct {
+ // service references a service as a backend.
+ // This is a mutually exclusive setting with "Resource".
+ // +optional
+ Service *IngressServiceBackend `json:"service,omitempty" protobuf:"bytes,4,opt,name=service"`
+}
+
+// IngressServiceBackend references a Kubernetes Service as a Backend.
+type IngressServiceBackend struct {
+ // name is the referenced service. The service must exist in
+ // the same namespace as the Ingress object.
+ Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
+
+ // port of the referenced service. A port name or port number
+ // is required for a IngressServiceBackend.
+ Port ServiceBackendPort `json:"port,omitempty" protobuf:"bytes,2,opt,name=port"`
+}
+
+// ServiceBackendPort is the service port being referenced.
+type ServiceBackendPort struct {
+ // name is the name of the port on the Service.
+ // This is a mutually exclusive setting with "Number".
+ // +optional
+ Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
+
+ // number is the numerical port number (e.g. 80) on the Service.
+ // This is a mutually exclusive setting with "Name".
+ // +optional
+ Number int32 `json:"number,omitempty" protobuf:"bytes,2,opt,name=number"`
+}
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// IngressClass represents the class of the Ingress, referenced by the Ingress
+// Spec. The `ingressclass.kubernetes.io/is-default-class` annotation can be
+// used to indicate that an IngressClass should be considered default. When a
+// single IngressClass resource has this annotation set to true, new Ingress
+// resources without a class specified will be assigned this default class.
+type IngressClass struct {
+ slim_metav1.TypeMeta `json:",inline"`
+
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // +optional
+ slim_metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // spec is the desired state of the IngressClass.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ // +optional
+ Spec IngressClassSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+}
+
+// IngressClassSpec provides information about the class of an Ingress.
+type IngressClassSpec struct {
+ // controller refers to the name of the controller that should handle this
+ // class. This allows for different "flavors" that are controlled by the
+ // same controller. For example, you may have different parameters for the
+ // same implementing controller. This should be specified as a
+ // domain-prefixed path no more than 250 characters in length, e.g.
+ // "acme.io/ingress-controller". This field is immutable.
+ Controller string `json:"controller,omitempty" protobuf:"bytes,1,opt,name=controller"`
+
+ // parameters is a link to a custom resource containing additional
+ // configuration for the controller. This is optional if the controller does
+ // not require extra parameters.
+ // +optional
+ Parameters *IngressClassParametersReference `json:"parameters,omitempty" protobuf:"bytes,2,opt,name=parameters"`
+}
+
+const (
+ // IngressClassParametersReferenceScopeNamespace indicates that the
+ // referenced Parameters resource is namespace-scoped.
+ IngressClassParametersReferenceScopeNamespace = "Namespace"
+ // IngressClassParametersReferenceScopeCluster indicates that the
+ // referenced Parameters resource is cluster-scoped.
+ IngressClassParametersReferenceScopeCluster = "Cluster"
+)
+
+// IngressClassParametersReference identifies an API object. This can be used
+// to specify a cluster or namespace-scoped resource.
+type IngressClassParametersReference struct {
+ // apiGroup is the group for the resource being referenced. If APIGroup is
+ // not specified, the specified Kind must be in the core API group. For any
+ // other third-party types, APIGroup is required.
+ // +optional
+ APIGroup *string `json:"apiGroup,omitempty" protobuf:"bytes,1,opt,name=aPIGroup"`
+
+ // kind is the type of resource being referenced.
+ Kind string `json:"kind" protobuf:"bytes,2,opt,name=kind"`
+
+ // name is the name of resource being referenced.
+ Name string `json:"name" protobuf:"bytes,3,opt,name=name"`
+
+ // scope represents if this refers to a cluster or namespace scoped resource.
+ // This may be set to "Cluster" (default) or "Namespace".
+ // +optional
+ Scope *string `json:"scope" protobuf:"bytes,4,opt,name=scope"`
+
+ // namespace is the namespace of the resource being referenced. This field is
+ // required when scope is set to "Namespace" and must be unset when scope is set to
+ // "Cluster".
+ // +optional
+ Namespace *string `json:"namespace,omitempty" protobuf:"bytes,5,opt,name=namespace"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// IngressClassList is a collection of IngressClasses.
+type IngressClassList struct {
+ slim_metav1.TypeMeta `json:",inline"`
+
+ // Standard list metadata.
+ // +optional
+ slim_metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // items is the list of IngressClasses.
+ Items []IngressClass `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1/well_known_annotations.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1/well_known_annotations.go
new file mode 100644
index 000000000..136d2456a
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1/well_known_annotations.go
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Copyright 2020 The Kubernetes Authors.
+
+package v1
+
+const (
+ // AnnotationIsDefaultIngressClass can be used to indicate that an
+ // IngressClass should be considered default. When a single IngressClass
+ // resource has this annotation set to true, new Ingress resources without a
+ // class specified will be assigned this default class.
+ AnnotationIsDefaultIngressClass = "ingressclass.kubernetes.io/is-default-class"
+)
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1/zz_generated.deepcopy.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1/zz_generated.deepcopy.go
new file mode 100644
index 000000000..251c53bd4
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1/zz_generated.deepcopy.go
@@ -0,0 +1,710 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ corev1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1"
+ metav1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1"
+ intstr "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HTTPIngressPath) DeepCopyInto(out *HTTPIngressPath) {
+ *out = *in
+ if in.PathType != nil {
+ in, out := &in.PathType, &out.PathType
+ *out = new(PathType)
+ **out = **in
+ }
+ in.Backend.DeepCopyInto(&out.Backend)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPIngressPath.
+func (in *HTTPIngressPath) DeepCopy() *HTTPIngressPath {
+ if in == nil {
+ return nil
+ }
+ out := new(HTTPIngressPath)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HTTPIngressRuleValue) DeepCopyInto(out *HTTPIngressRuleValue) {
+ *out = *in
+ if in.Paths != nil {
+ in, out := &in.Paths, &out.Paths
+ *out = make([]HTTPIngressPath, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPIngressRuleValue.
+func (in *HTTPIngressRuleValue) DeepCopy() *HTTPIngressRuleValue {
+ if in == nil {
+ return nil
+ }
+ out := new(HTTPIngressRuleValue)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IPBlock) DeepCopyInto(out *IPBlock) {
+ *out = *in
+ if in.Except != nil {
+ in, out := &in.Except, &out.Except
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPBlock.
+func (in *IPBlock) DeepCopy() *IPBlock {
+ if in == nil {
+ return nil
+ }
+ out := new(IPBlock)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Ingress) DeepCopyInto(out *Ingress) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Ingress.
+func (in *Ingress) DeepCopy() *Ingress {
+ if in == nil {
+ return nil
+ }
+ out := new(Ingress)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Ingress) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IngressBackend) DeepCopyInto(out *IngressBackend) {
+ *out = *in
+ if in.Service != nil {
+ in, out := &in.Service, &out.Service
+ *out = new(IngressServiceBackend)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressBackend.
+func (in *IngressBackend) DeepCopy() *IngressBackend {
+ if in == nil {
+ return nil
+ }
+ out := new(IngressBackend)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IngressClass) DeepCopyInto(out *IngressClass) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressClass.
+func (in *IngressClass) DeepCopy() *IngressClass {
+ if in == nil {
+ return nil
+ }
+ out := new(IngressClass)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *IngressClass) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IngressClassList) DeepCopyInto(out *IngressClassList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]IngressClass, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressClassList.
+func (in *IngressClassList) DeepCopy() *IngressClassList {
+ if in == nil {
+ return nil
+ }
+ out := new(IngressClassList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *IngressClassList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IngressClassParametersReference) DeepCopyInto(out *IngressClassParametersReference) {
+ *out = *in
+ if in.APIGroup != nil {
+ in, out := &in.APIGroup, &out.APIGroup
+ *out = new(string)
+ **out = **in
+ }
+ if in.Scope != nil {
+ in, out := &in.Scope, &out.Scope
+ *out = new(string)
+ **out = **in
+ }
+ if in.Namespace != nil {
+ in, out := &in.Namespace, &out.Namespace
+ *out = new(string)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressClassParametersReference.
+func (in *IngressClassParametersReference) DeepCopy() *IngressClassParametersReference {
+ if in == nil {
+ return nil
+ }
+ out := new(IngressClassParametersReference)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IngressClassSpec) DeepCopyInto(out *IngressClassSpec) {
+ *out = *in
+ if in.Parameters != nil {
+ in, out := &in.Parameters, &out.Parameters
+ *out = new(IngressClassParametersReference)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressClassSpec.
+func (in *IngressClassSpec) DeepCopy() *IngressClassSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(IngressClassSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IngressList) DeepCopyInto(out *IngressList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Ingress, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressList.
+func (in *IngressList) DeepCopy() *IngressList {
+ if in == nil {
+ return nil
+ }
+ out := new(IngressList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *IngressList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IngressLoadBalancerIngress) DeepCopyInto(out *IngressLoadBalancerIngress) {
+ *out = *in
+ if in.Ports != nil {
+ in, out := &in.Ports, &out.Ports
+ *out = make([]IngressPortStatus, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressLoadBalancerIngress.
+func (in *IngressLoadBalancerIngress) DeepCopy() *IngressLoadBalancerIngress {
+ if in == nil {
+ return nil
+ }
+ out := new(IngressLoadBalancerIngress)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IngressLoadBalancerStatus) DeepCopyInto(out *IngressLoadBalancerStatus) {
+ *out = *in
+ if in.Ingress != nil {
+ in, out := &in.Ingress, &out.Ingress
+ *out = make([]IngressLoadBalancerIngress, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressLoadBalancerStatus.
+func (in *IngressLoadBalancerStatus) DeepCopy() *IngressLoadBalancerStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(IngressLoadBalancerStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IngressPortStatus) DeepCopyInto(out *IngressPortStatus) {
+ *out = *in
+ if in.Error != nil {
+ in, out := &in.Error, &out.Error
+ *out = new(string)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressPortStatus.
+func (in *IngressPortStatus) DeepCopy() *IngressPortStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(IngressPortStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IngressRule) DeepCopyInto(out *IngressRule) {
+ *out = *in
+ in.IngressRuleValue.DeepCopyInto(&out.IngressRuleValue)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressRule.
+func (in *IngressRule) DeepCopy() *IngressRule {
+ if in == nil {
+ return nil
+ }
+ out := new(IngressRule)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IngressRuleValue) DeepCopyInto(out *IngressRuleValue) {
+ *out = *in
+ if in.HTTP != nil {
+ in, out := &in.HTTP, &out.HTTP
+ *out = new(HTTPIngressRuleValue)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressRuleValue.
+func (in *IngressRuleValue) DeepCopy() *IngressRuleValue {
+ if in == nil {
+ return nil
+ }
+ out := new(IngressRuleValue)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IngressServiceBackend) DeepCopyInto(out *IngressServiceBackend) {
+ *out = *in
+ out.Port = in.Port
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressServiceBackend.
+func (in *IngressServiceBackend) DeepCopy() *IngressServiceBackend {
+ if in == nil {
+ return nil
+ }
+ out := new(IngressServiceBackend)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IngressSpec) DeepCopyInto(out *IngressSpec) {
+ *out = *in
+ if in.IngressClassName != nil {
+ in, out := &in.IngressClassName, &out.IngressClassName
+ *out = new(string)
+ **out = **in
+ }
+ if in.DefaultBackend != nil {
+ in, out := &in.DefaultBackend, &out.DefaultBackend
+ *out = new(IngressBackend)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.TLS != nil {
+ in, out := &in.TLS, &out.TLS
+ *out = make([]IngressTLS, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Rules != nil {
+ in, out := &in.Rules, &out.Rules
+ *out = make([]IngressRule, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressSpec.
+func (in *IngressSpec) DeepCopy() *IngressSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(IngressSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IngressStatus) DeepCopyInto(out *IngressStatus) {
+ *out = *in
+ in.LoadBalancer.DeepCopyInto(&out.LoadBalancer)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressStatus.
+func (in *IngressStatus) DeepCopy() *IngressStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(IngressStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IngressTLS) DeepCopyInto(out *IngressTLS) {
+ *out = *in
+ if in.Hosts != nil {
+ in, out := &in.Hosts, &out.Hosts
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressTLS.
+func (in *IngressTLS) DeepCopy() *IngressTLS {
+ if in == nil {
+ return nil
+ }
+ out := new(IngressTLS)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NetworkPolicy) DeepCopyInto(out *NetworkPolicy) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicy.
+func (in *NetworkPolicy) DeepCopy() *NetworkPolicy {
+ if in == nil {
+ return nil
+ }
+ out := new(NetworkPolicy)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *NetworkPolicy) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NetworkPolicyEgressRule) DeepCopyInto(out *NetworkPolicyEgressRule) {
+ *out = *in
+ if in.Ports != nil {
+ in, out := &in.Ports, &out.Ports
+ *out = make([]NetworkPolicyPort, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.To != nil {
+ in, out := &in.To, &out.To
+ *out = make([]NetworkPolicyPeer, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyEgressRule.
+func (in *NetworkPolicyEgressRule) DeepCopy() *NetworkPolicyEgressRule {
+ if in == nil {
+ return nil
+ }
+ out := new(NetworkPolicyEgressRule)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NetworkPolicyIngressRule) DeepCopyInto(out *NetworkPolicyIngressRule) {
+ *out = *in
+ if in.Ports != nil {
+ in, out := &in.Ports, &out.Ports
+ *out = make([]NetworkPolicyPort, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.From != nil {
+ in, out := &in.From, &out.From
+ *out = make([]NetworkPolicyPeer, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyIngressRule.
+func (in *NetworkPolicyIngressRule) DeepCopy() *NetworkPolicyIngressRule {
+ if in == nil {
+ return nil
+ }
+ out := new(NetworkPolicyIngressRule)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NetworkPolicyList) DeepCopyInto(out *NetworkPolicyList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]NetworkPolicy, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyList.
+func (in *NetworkPolicyList) DeepCopy() *NetworkPolicyList {
+ if in == nil {
+ return nil
+ }
+ out := new(NetworkPolicyList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *NetworkPolicyList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NetworkPolicyPeer) DeepCopyInto(out *NetworkPolicyPeer) {
+ *out = *in
+ if in.PodSelector != nil {
+ in, out := &in.PodSelector, &out.PodSelector
+ *out = new(metav1.LabelSelector)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.NamespaceSelector != nil {
+ in, out := &in.NamespaceSelector, &out.NamespaceSelector
+ *out = new(metav1.LabelSelector)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.IPBlock != nil {
+ in, out := &in.IPBlock, &out.IPBlock
+ *out = new(IPBlock)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyPeer.
+func (in *NetworkPolicyPeer) DeepCopy() *NetworkPolicyPeer {
+ if in == nil {
+ return nil
+ }
+ out := new(NetworkPolicyPeer)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NetworkPolicyPort) DeepCopyInto(out *NetworkPolicyPort) {
+ *out = *in
+ if in.Protocol != nil {
+ in, out := &in.Protocol, &out.Protocol
+ *out = new(corev1.Protocol)
+ **out = **in
+ }
+ if in.Port != nil {
+ in, out := &in.Port, &out.Port
+ *out = new(intstr.IntOrString)
+ **out = **in
+ }
+ if in.EndPort != nil {
+ in, out := &in.EndPort, &out.EndPort
+ *out = new(int32)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyPort.
+func (in *NetworkPolicyPort) DeepCopy() *NetworkPolicyPort {
+ if in == nil {
+ return nil
+ }
+ out := new(NetworkPolicyPort)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *NetworkPolicySpec) DeepCopyInto(out *NetworkPolicySpec) {
+ *out = *in
+ in.PodSelector.DeepCopyInto(&out.PodSelector)
+ if in.Ingress != nil {
+ in, out := &in.Ingress, &out.Ingress
+ *out = make([]NetworkPolicyIngressRule, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Egress != nil {
+ in, out := &in.Egress, &out.Egress
+ *out = make([]NetworkPolicyEgressRule, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.PolicyTypes != nil {
+ in, out := &in.PolicyTypes, &out.PolicyTypes
+ *out = make([]PolicyType, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicySpec.
+func (in *NetworkPolicySpec) DeepCopy() *NetworkPolicySpec {
+ if in == nil {
+ return nil
+ }
+ out := new(NetworkPolicySpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ServiceBackendPort) DeepCopyInto(out *ServiceBackendPort) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceBackendPort.
+func (in *ServiceBackendPort) DeepCopy() *ServiceBackendPort {
+ if in == nil {
+ return nil
+ }
+ out := new(ServiceBackendPort)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1/zz_generated.deepequal.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1/zz_generated.deepequal.go
new file mode 100644
index 000000000..e09e0d639
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1/zz_generated.deepequal.go
@@ -0,0 +1,824 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by deepequal-gen. DO NOT EDIT.
+
+package v1
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *HTTPIngressPath) DeepEqual(other *HTTPIngressPath) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.Path != other.Path {
+ return false
+ }
+ if (in.PathType == nil) != (other.PathType == nil) {
+ return false
+ } else if in.PathType != nil {
+ if *in.PathType != *other.PathType {
+ return false
+ }
+ }
+
+ if !in.Backend.DeepEqual(&other.Backend) {
+ return false
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *HTTPIngressRuleValue) DeepEqual(other *HTTPIngressRuleValue) bool {
+ if other == nil {
+ return false
+ }
+
+ if ((in.Paths != nil) && (other.Paths != nil)) || ((in.Paths == nil) != (other.Paths == nil)) {
+ in, other := &in.Paths, &other.Paths
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if !inElement.DeepEqual(&(*other)[i]) {
+ return false
+ }
+ }
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *IPBlock) DeepEqual(other *IPBlock) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.CIDR != other.CIDR {
+ return false
+ }
+ if ((in.Except != nil) && (other.Except != nil)) || ((in.Except == nil) != (other.Except == nil)) {
+ in, other := &in.Except, &other.Except
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if inElement != (*other)[i] {
+ return false
+ }
+ }
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *Ingress) DeepEqual(other *Ingress) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.TypeMeta != other.TypeMeta {
+ return false
+ }
+
+ if !in.ObjectMeta.DeepEqual(&other.ObjectMeta) {
+ return false
+ }
+
+ if !in.Spec.DeepEqual(&other.Spec) {
+ return false
+ }
+
+ if !in.Status.DeepEqual(&other.Status) {
+ return false
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *IngressBackend) DeepEqual(other *IngressBackend) bool {
+ if other == nil {
+ return false
+ }
+
+ if (in.Service == nil) != (other.Service == nil) {
+ return false
+ } else if in.Service != nil {
+ if !in.Service.DeepEqual(other.Service) {
+ return false
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *IngressClass) DeepEqual(other *IngressClass) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.TypeMeta != other.TypeMeta {
+ return false
+ }
+
+ if !in.ObjectMeta.DeepEqual(&other.ObjectMeta) {
+ return false
+ }
+
+ if !in.Spec.DeepEqual(&other.Spec) {
+ return false
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *IngressClassList) DeepEqual(other *IngressClassList) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.TypeMeta != other.TypeMeta {
+ return false
+ }
+
+ if !in.ListMeta.DeepEqual(&other.ListMeta) {
+ return false
+ }
+
+ if ((in.Items != nil) && (other.Items != nil)) || ((in.Items == nil) != (other.Items == nil)) {
+ in, other := &in.Items, &other.Items
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if !inElement.DeepEqual(&(*other)[i]) {
+ return false
+ }
+ }
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *IngressClassParametersReference) DeepEqual(other *IngressClassParametersReference) bool {
+ if other == nil {
+ return false
+ }
+
+ if (in.APIGroup == nil) != (other.APIGroup == nil) {
+ return false
+ } else if in.APIGroup != nil {
+ if *in.APIGroup != *other.APIGroup {
+ return false
+ }
+ }
+
+ if in.Kind != other.Kind {
+ return false
+ }
+ if in.Name != other.Name {
+ return false
+ }
+ if (in.Scope == nil) != (other.Scope == nil) {
+ return false
+ } else if in.Scope != nil {
+ if *in.Scope != *other.Scope {
+ return false
+ }
+ }
+
+ if (in.Namespace == nil) != (other.Namespace == nil) {
+ return false
+ } else if in.Namespace != nil {
+ if *in.Namespace != *other.Namespace {
+ return false
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *IngressClassSpec) DeepEqual(other *IngressClassSpec) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.Controller != other.Controller {
+ return false
+ }
+ if (in.Parameters == nil) != (other.Parameters == nil) {
+ return false
+ } else if in.Parameters != nil {
+ if !in.Parameters.DeepEqual(other.Parameters) {
+ return false
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *IngressList) DeepEqual(other *IngressList) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.TypeMeta != other.TypeMeta {
+ return false
+ }
+
+ if !in.ListMeta.DeepEqual(&other.ListMeta) {
+ return false
+ }
+
+ if ((in.Items != nil) && (other.Items != nil)) || ((in.Items == nil) != (other.Items == nil)) {
+ in, other := &in.Items, &other.Items
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if !inElement.DeepEqual(&(*other)[i]) {
+ return false
+ }
+ }
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *IngressLoadBalancerIngress) DeepEqual(other *IngressLoadBalancerIngress) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.IP != other.IP {
+ return false
+ }
+ if in.Hostname != other.Hostname {
+ return false
+ }
+ if ((in.Ports != nil) && (other.Ports != nil)) || ((in.Ports == nil) != (other.Ports == nil)) {
+ in, other := &in.Ports, &other.Ports
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if !inElement.DeepEqual(&(*other)[i]) {
+ return false
+ }
+ }
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *IngressLoadBalancerStatus) DeepEqual(other *IngressLoadBalancerStatus) bool {
+ if other == nil {
+ return false
+ }
+
+ if ((in.Ingress != nil) && (other.Ingress != nil)) || ((in.Ingress == nil) != (other.Ingress == nil)) {
+ in, other := &in.Ingress, &other.Ingress
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if !inElement.DeepEqual(&(*other)[i]) {
+ return false
+ }
+ }
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *IngressPortStatus) DeepEqual(other *IngressPortStatus) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.Port != other.Port {
+ return false
+ }
+ if in.Protocol != other.Protocol {
+ return false
+ }
+ if (in.Error == nil) != (other.Error == nil) {
+ return false
+ } else if in.Error != nil {
+ if *in.Error != *other.Error {
+ return false
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *IngressRule) DeepEqual(other *IngressRule) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.Host != other.Host {
+ return false
+ }
+ if !in.IngressRuleValue.DeepEqual(&other.IngressRuleValue) {
+ return false
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *IngressRuleValue) DeepEqual(other *IngressRuleValue) bool {
+ if other == nil {
+ return false
+ }
+
+ if (in.HTTP == nil) != (other.HTTP == nil) {
+ return false
+ } else if in.HTTP != nil {
+ if !in.HTTP.DeepEqual(other.HTTP) {
+ return false
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *IngressServiceBackend) DeepEqual(other *IngressServiceBackend) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.Name != other.Name {
+ return false
+ }
+ if in.Port != other.Port {
+ return false
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *IngressSpec) DeepEqual(other *IngressSpec) bool {
+ if other == nil {
+ return false
+ }
+
+ if (in.IngressClassName == nil) != (other.IngressClassName == nil) {
+ return false
+ } else if in.IngressClassName != nil {
+ if *in.IngressClassName != *other.IngressClassName {
+ return false
+ }
+ }
+
+ if (in.DefaultBackend == nil) != (other.DefaultBackend == nil) {
+ return false
+ } else if in.DefaultBackend != nil {
+ if !in.DefaultBackend.DeepEqual(other.DefaultBackend) {
+ return false
+ }
+ }
+
+ if ((in.TLS != nil) && (other.TLS != nil)) || ((in.TLS == nil) != (other.TLS == nil)) {
+ in, other := &in.TLS, &other.TLS
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if !inElement.DeepEqual(&(*other)[i]) {
+ return false
+ }
+ }
+ }
+ }
+
+ if ((in.Rules != nil) && (other.Rules != nil)) || ((in.Rules == nil) != (other.Rules == nil)) {
+ in, other := &in.Rules, &other.Rules
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if !inElement.DeepEqual(&(*other)[i]) {
+ return false
+ }
+ }
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *IngressStatus) DeepEqual(other *IngressStatus) bool {
+ if other == nil {
+ return false
+ }
+
+ if !in.LoadBalancer.DeepEqual(&other.LoadBalancer) {
+ return false
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *IngressTLS) DeepEqual(other *IngressTLS) bool {
+ if other == nil {
+ return false
+ }
+
+ if ((in.Hosts != nil) && (other.Hosts != nil)) || ((in.Hosts == nil) != (other.Hosts == nil)) {
+ in, other := &in.Hosts, &other.Hosts
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if inElement != (*other)[i] {
+ return false
+ }
+ }
+ }
+ }
+
+ if in.SecretName != other.SecretName {
+ return false
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *NetworkPolicy) DeepEqual(other *NetworkPolicy) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.TypeMeta != other.TypeMeta {
+ return false
+ }
+
+ if !in.ObjectMeta.DeepEqual(&other.ObjectMeta) {
+ return false
+ }
+
+ if !in.Spec.DeepEqual(&other.Spec) {
+ return false
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *NetworkPolicyEgressRule) DeepEqual(other *NetworkPolicyEgressRule) bool {
+ if other == nil {
+ return false
+ }
+
+ if ((in.Ports != nil) && (other.Ports != nil)) || ((in.Ports == nil) != (other.Ports == nil)) {
+ in, other := &in.Ports, &other.Ports
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if !inElement.DeepEqual(&(*other)[i]) {
+ return false
+ }
+ }
+ }
+ }
+
+ if ((in.To != nil) && (other.To != nil)) || ((in.To == nil) != (other.To == nil)) {
+ in, other := &in.To, &other.To
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if !inElement.DeepEqual(&(*other)[i]) {
+ return false
+ }
+ }
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *NetworkPolicyIngressRule) DeepEqual(other *NetworkPolicyIngressRule) bool {
+ if other == nil {
+ return false
+ }
+
+ if ((in.Ports != nil) && (other.Ports != nil)) || ((in.Ports == nil) != (other.Ports == nil)) {
+ in, other := &in.Ports, &other.Ports
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if !inElement.DeepEqual(&(*other)[i]) {
+ return false
+ }
+ }
+ }
+ }
+
+ if ((in.From != nil) && (other.From != nil)) || ((in.From == nil) != (other.From == nil)) {
+ in, other := &in.From, &other.From
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if !inElement.DeepEqual(&(*other)[i]) {
+ return false
+ }
+ }
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *NetworkPolicyList) DeepEqual(other *NetworkPolicyList) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.TypeMeta != other.TypeMeta {
+ return false
+ }
+
+ if !in.ListMeta.DeepEqual(&other.ListMeta) {
+ return false
+ }
+
+ if ((in.Items != nil) && (other.Items != nil)) || ((in.Items == nil) != (other.Items == nil)) {
+ in, other := &in.Items, &other.Items
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if !inElement.DeepEqual(&(*other)[i]) {
+ return false
+ }
+ }
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *NetworkPolicyPeer) DeepEqual(other *NetworkPolicyPeer) bool {
+ if other == nil {
+ return false
+ }
+
+ if (in.PodSelector == nil) != (other.PodSelector == nil) {
+ return false
+ } else if in.PodSelector != nil {
+ if !in.PodSelector.DeepEqual(other.PodSelector) {
+ return false
+ }
+ }
+
+ if (in.NamespaceSelector == nil) != (other.NamespaceSelector == nil) {
+ return false
+ } else if in.NamespaceSelector != nil {
+ if !in.NamespaceSelector.DeepEqual(other.NamespaceSelector) {
+ return false
+ }
+ }
+
+ if (in.IPBlock == nil) != (other.IPBlock == nil) {
+ return false
+ } else if in.IPBlock != nil {
+ if !in.IPBlock.DeepEqual(other.IPBlock) {
+ return false
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *NetworkPolicyPort) DeepEqual(other *NetworkPolicyPort) bool {
+ if other == nil {
+ return false
+ }
+
+ if (in.Protocol == nil) != (other.Protocol == nil) {
+ return false
+ } else if in.Protocol != nil {
+ if *in.Protocol != *other.Protocol {
+ return false
+ }
+ }
+
+ if (in.Port == nil) != (other.Port == nil) {
+ return false
+ } else if in.Port != nil {
+ if !in.Port.DeepEqual(other.Port) {
+ return false
+ }
+ }
+
+ if (in.EndPort == nil) != (other.EndPort == nil) {
+ return false
+ } else if in.EndPort != nil {
+ if *in.EndPort != *other.EndPort {
+ return false
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *NetworkPolicySpec) DeepEqual(other *NetworkPolicySpec) bool {
+ if other == nil {
+ return false
+ }
+
+ if !in.PodSelector.DeepEqual(&other.PodSelector) {
+ return false
+ }
+
+ if ((in.Ingress != nil) && (other.Ingress != nil)) || ((in.Ingress == nil) != (other.Ingress == nil)) {
+ in, other := &in.Ingress, &other.Ingress
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if !inElement.DeepEqual(&(*other)[i]) {
+ return false
+ }
+ }
+ }
+ }
+
+ if ((in.Egress != nil) && (other.Egress != nil)) || ((in.Egress == nil) != (other.Egress == nil)) {
+ in, other := &in.Egress, &other.Egress
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if !inElement.DeepEqual(&(*other)[i]) {
+ return false
+ }
+ }
+ }
+ }
+
+ if ((in.PolicyTypes != nil) && (other.PolicyTypes != nil)) || ((in.PolicyTypes == nil) != (other.PolicyTypes == nil)) {
+ in, other := &in.PolicyTypes, &other.PolicyTypes
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if inElement != (*other)[i] {
+ return false
+ }
+ }
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *ServiceBackendPort) DeepEqual(other *ServiceBackendPort) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.Name != other.Name {
+ return false
+ }
+ if in.Number != other.Number {
+ return false
+ }
+
+ return true
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apiextensions-client/clientset/versioned/scheme/doc.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apiextensions-client/clientset/versioned/scheme/doc.go
new file mode 100644
index 000000000..ba3451535
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apiextensions-client/clientset/versioned/scheme/doc.go
@@ -0,0 +1,7 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package contains the scheme of the automatically generated clientset.
+package scheme
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apiextensions-client/clientset/versioned/scheme/register.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apiextensions-client/clientset/versioned/scheme/register.go
new file mode 100644
index 000000000..cc6ff2709
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apiextensions-client/clientset/versioned/scheme/register.go
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package scheme
+
+import (
+ apiextensionsv1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/apiextensions/v1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ serializer "k8s.io/apimachinery/pkg/runtime/serializer"
+ utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+)
+
+var Scheme = runtime.NewScheme()
+var Codecs = serializer.NewCodecFactory(Scheme)
+var ParameterCodec = runtime.NewParameterCodec(Scheme)
+var localSchemeBuilder = runtime.SchemeBuilder{
+ apiextensionsv1.AddToScheme,
+}
+
+// AddToScheme adds all types of this clientset into the given scheme. This allows composition
+// of clientsets, like in:
+//
+// import (
+// "k8s.io/client-go/kubernetes"
+// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
+// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
+// )
+//
+// kclientset, _ := kubernetes.NewForConfig(c)
+// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
+//
+// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
+// correctly.
+var AddToScheme = localSchemeBuilder.AddToScheme
+
+func init() {
+ v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"})
+ utilruntime.Must(AddToScheme(Scheme))
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apiextensions-client/clientset/versioned/typed/apiextensions/v1/apiextensions_client.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apiextensions-client/clientset/versioned/typed/apiextensions/v1/apiextensions_client.go
new file mode 100644
index 000000000..5f5e04bd9
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apiextensions-client/clientset/versioned/typed/apiextensions/v1/apiextensions_client.go
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "net/http"
+
+ "github.com/cilium/cilium/pkg/k8s/slim/k8s/apiextensions-client/clientset/versioned/scheme"
+ v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/apiextensions/v1"
+ rest "k8s.io/client-go/rest"
+)
+
+type ApiextensionsV1Interface interface {
+ RESTClient() rest.Interface
+ CustomResourceDefinitionsGetter
+}
+
+// ApiextensionsV1Client is used to interact with features provided by the apiextensions.k8s.io group.
+type ApiextensionsV1Client struct {
+ restClient rest.Interface
+}
+
+func (c *ApiextensionsV1Client) CustomResourceDefinitions() CustomResourceDefinitionInterface {
+ return newCustomResourceDefinitions(c)
+}
+
+// NewForConfig creates a new ApiextensionsV1Client for the given config.
+// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
+// where httpClient was generated with rest.HTTPClientFor(c).
+func NewForConfig(c *rest.Config) (*ApiextensionsV1Client, error) {
+ config := *c
+ if err := setConfigDefaults(&config); err != nil {
+ return nil, err
+ }
+ httpClient, err := rest.HTTPClientFor(&config)
+ if err != nil {
+ return nil, err
+ }
+ return NewForConfigAndClient(&config, httpClient)
+}
+
+// NewForConfigAndClient creates a new ApiextensionsV1Client for the given config and http client.
+// Note the http client provided takes precedence over the configured transport values.
+func NewForConfigAndClient(c *rest.Config, h *http.Client) (*ApiextensionsV1Client, error) {
+ config := *c
+ if err := setConfigDefaults(&config); err != nil {
+ return nil, err
+ }
+ client, err := rest.RESTClientForConfigAndClient(&config, h)
+ if err != nil {
+ return nil, err
+ }
+ return &ApiextensionsV1Client{client}, nil
+}
+
+// NewForConfigOrDie creates a new ApiextensionsV1Client for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *ApiextensionsV1Client {
+ client, err := NewForConfig(c)
+ if err != nil {
+ panic(err)
+ }
+ return client
+}
+
+// New creates a new ApiextensionsV1Client for the given RESTClient.
+func New(c rest.Interface) *ApiextensionsV1Client {
+ return &ApiextensionsV1Client{c}
+}
+
+func setConfigDefaults(config *rest.Config) error {
+ gv := v1.SchemeGroupVersion
+ config.GroupVersion = &gv
+ config.APIPath = "/apis"
+ config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+
+ if config.UserAgent == "" {
+ config.UserAgent = rest.DefaultKubernetesUserAgent()
+ }
+
+ return nil
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *ApiextensionsV1Client) RESTClient() rest.Interface {
+ if c == nil {
+ return nil
+ }
+ return c.restClient
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apiextensions-client/clientset/versioned/typed/apiextensions/v1/customresourcedefinition.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apiextensions-client/clientset/versioned/typed/apiextensions/v1/customresourcedefinition.go
new file mode 100644
index 000000000..d251544ef
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apiextensions-client/clientset/versioned/typed/apiextensions/v1/customresourcedefinition.go
@@ -0,0 +1,155 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ "time"
+
+ scheme "github.com/cilium/cilium/pkg/k8s/slim/k8s/apiextensions-client/clientset/versioned/scheme"
+ v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/apiextensions/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// CustomResourceDefinitionsGetter has a method to return a CustomResourceDefinitionInterface.
+// A group's client should implement this interface.
+type CustomResourceDefinitionsGetter interface {
+ CustomResourceDefinitions() CustomResourceDefinitionInterface
+}
+
+// CustomResourceDefinitionInterface has methods to work with CustomResourceDefinition resources.
+type CustomResourceDefinitionInterface interface {
+ Create(ctx context.Context, customResourceDefinition *v1.CustomResourceDefinition, opts metav1.CreateOptions) (*v1.CustomResourceDefinition, error)
+ Update(ctx context.Context, customResourceDefinition *v1.CustomResourceDefinition, opts metav1.UpdateOptions) (*v1.CustomResourceDefinition, error)
+ Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.CustomResourceDefinition, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*v1.CustomResourceDefinitionList, error)
+ Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CustomResourceDefinition, err error)
+ CustomResourceDefinitionExpansion
+}
+
+// customResourceDefinitions implements CustomResourceDefinitionInterface
+type customResourceDefinitions struct {
+ client rest.Interface
+}
+
+// newCustomResourceDefinitions returns a CustomResourceDefinitions
+func newCustomResourceDefinitions(c *ApiextensionsV1Client) *customResourceDefinitions {
+ return &customResourceDefinitions{
+ client: c.RESTClient(),
+ }
+}
+
+// Get takes name of the customResourceDefinition, and returns the corresponding customResourceDefinition object, and an error if there is any.
+func (c *customResourceDefinitions) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CustomResourceDefinition, err error) {
+ result = &v1.CustomResourceDefinition{}
+ err = c.client.Get().
+ Resource("customresourcedefinitions").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of CustomResourceDefinitions that match those selectors.
+func (c *customResourceDefinitions) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CustomResourceDefinitionList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1.CustomResourceDefinitionList{}
+ err = c.client.Get().
+ Resource("customresourcedefinitions").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested customResourceDefinitions.
+func (c *customResourceDefinitions) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Resource("customresourcedefinitions").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a customResourceDefinition and creates it. Returns the server's representation of the customResourceDefinition, and an error, if there is any.
+func (c *customResourceDefinitions) Create(ctx context.Context, customResourceDefinition *v1.CustomResourceDefinition, opts metav1.CreateOptions) (result *v1.CustomResourceDefinition, err error) {
+ result = &v1.CustomResourceDefinition{}
+ err = c.client.Post().
+ Resource("customresourcedefinitions").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(customResourceDefinition).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a customResourceDefinition and updates it. Returns the server's representation of the customResourceDefinition, and an error, if there is any.
+func (c *customResourceDefinitions) Update(ctx context.Context, customResourceDefinition *v1.CustomResourceDefinition, opts metav1.UpdateOptions) (result *v1.CustomResourceDefinition, err error) {
+ result = &v1.CustomResourceDefinition{}
+ err = c.client.Put().
+ Resource("customresourcedefinitions").
+ Name(customResourceDefinition.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(customResourceDefinition).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the customResourceDefinition and deletes it. Returns an error if one occurs.
+func (c *customResourceDefinitions) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
+ return c.client.Delete().
+ Resource("customresourcedefinitions").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *customResourceDefinitions) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Resource("customresourcedefinitions").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched customResourceDefinition.
+func (c *customResourceDefinitions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CustomResourceDefinition, err error) {
+ result = &v1.CustomResourceDefinition{}
+ err = c.client.Patch(pt).
+ Resource("customresourcedefinitions").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apiextensions-client/clientset/versioned/typed/apiextensions/v1/doc.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apiextensions-client/clientset/versioned/typed/apiextensions/v1/doc.go
new file mode 100644
index 000000000..50cfbd485
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apiextensions-client/clientset/versioned/typed/apiextensions/v1/doc.go
@@ -0,0 +1,7 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated typed clients.
+package v1
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apiextensions-client/clientset/versioned/typed/apiextensions/v1/generated_expansion.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apiextensions-client/clientset/versioned/typed/apiextensions/v1/generated_expansion.go
new file mode 100644
index 000000000..2ea7378ea
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apiextensions-client/clientset/versioned/typed/apiextensions/v1/generated_expansion.go
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+type CustomResourceDefinitionExpansion interface{}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apiextensions-clientset/clientset.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apiextensions-clientset/clientset.go
new file mode 100644
index 000000000..e15d6ef49
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apiextensions-clientset/clientset.go
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Copyright The Kubernetes Authors.
+
+package clientset
+
+import (
+ "fmt"
+ "net/http"
+
+ apiextclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
+ apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1"
+ apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1"
+ rest "k8s.io/client-go/rest"
+ flowcontrol "k8s.io/client-go/util/flowcontrol"
+
+ slim_apiextensionsv1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apiextensions-client/clientset/versioned/typed/apiextensions/v1"
+)
+
+// Clientset contains the clients for groups. Each group has exactly one
+// version included in a Clientset.
+type Clientset struct {
+ *apiextclientset.Clientset
+ apiextensionsV1beta1 *apiextensionsv1beta1.ApiextensionsV1beta1Client
+ apiextensionsV1 *apiextensionsv1.ApiextensionsV1Client
+}
+
+// ApiextensionsV1 retrieves the ApiextensionsV1Client
+func (c *Clientset) ApiextensionsV1() apiextensionsv1.ApiextensionsV1Interface {
+ return c.apiextensionsV1
+}
+
+// ApiextensionsV1beta1 retrieves the ApiextensionsV1beta1Client
+func (c *Clientset) ApiextensionsV1beta1() apiextensionsv1beta1.ApiextensionsV1beta1Interface {
+ return c.apiextensionsV1beta1
+}
+
+// NewForConfigAndClient creates a new Clientset for the given config and http client.
+// Note the http client provided takes precedence over the configured transport values.
+// If config's RateLimiter is not set and QPS and Burst are acceptable,
+// NewForConfigAndClient will generate a rate-limiter in configShallowCopy.
+func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, error) {
+ configShallowCopy := *c
+ if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 {
+ if configShallowCopy.Burst <= 0 {
+ return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0")
+ }
+ configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst)
+ }
+ var cs Clientset
+ var err error
+ cs.Clientset, err = apiextclientset.NewForConfigAndClient(&configShallowCopy, httpClient)
+ if err != nil {
+ return nil, err
+ }
+
+ // Wrap extensionsV1 with our own implementation
+ extensionsV1, err := slim_apiextensionsv1.NewForConfigAndClient(&configShallowCopy, httpClient)
+ if err != nil {
+ return nil, err
+ }
+ cs.apiextensionsV1 = apiextensionsv1.New(extensionsV1.RESTClient())
+
+ return &cs, nil
+}
+
+// NewForConfigOrDie creates a new Clientset for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *Clientset {
+ var cs Clientset
+ cs.Clientset = apiextclientset.NewForConfigOrDie(c)
+
+ // Wrap extensionsV1 with our own implementation
+ cs.apiextensionsV1 = apiextensionsv1.New(slim_apiextensionsv1.NewForConfigOrDie(c).RESTClient())
+
+ return &cs
+}
+
+// New creates a new Clientset for the given RESTClient.
+func New(c rest.Interface) *Clientset {
+ var cs Clientset
+ cs.Clientset = apiextclientset.New(c)
+
+ // Wrap extensionsV1 with our own implementation
+ cs.apiextensionsV1 = apiextensionsv1.New(slim_apiextensionsv1.New(c).RESTClient())
+
+ return &cs
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/apiextensions/v1/doc.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/apiextensions/v1/doc.go
new file mode 100644
index 000000000..c54b4a560
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/apiextensions/v1/doc.go
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Copyright 2019 The Kubernetes Authors.
+
+// +k8s:deepcopy-gen=package
+// +k8s:protobuf-gen=package
+// +k8s:conversion-gen=k8s.io/apiextensions-apiserver/pkg/apis/apiextensions
+// +k8s:defaulter-gen=TypeMeta
+// +k8s:openapi-gen=true
+// +groupName=apiextensions.k8s.io
+
+// Package v1 is the v1 version of the API.
+package v1
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/apiextensions/v1/generated.pb.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/apiextensions/v1/generated.pb.go
new file mode 100644
index 000000000..c53e29423
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/apiextensions/v1/generated.pb.go
@@ -0,0 +1,568 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/apiextensions/v1/generated.proto
+
+package v1
+
+import (
+ fmt "fmt"
+
+ io "io"
+
+ proto "github.com/gogo/protobuf/proto"
+
+ math "math"
+ math_bits "math/bits"
+ reflect "reflect"
+ strings "strings"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+func (m *CustomResourceDefinition) Reset() { *m = CustomResourceDefinition{} }
+func (*CustomResourceDefinition) ProtoMessage() {}
+func (*CustomResourceDefinition) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2ae25e910fba1c55, []int{0}
+}
+func (m *CustomResourceDefinition) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *CustomResourceDefinition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *CustomResourceDefinition) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CustomResourceDefinition.Merge(m, src)
+}
+func (m *CustomResourceDefinition) XXX_Size() int {
+ return m.Size()
+}
+func (m *CustomResourceDefinition) XXX_DiscardUnknown() {
+ xxx_messageInfo_CustomResourceDefinition.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CustomResourceDefinition proto.InternalMessageInfo
+
+func (m *CustomResourceDefinitionList) Reset() { *m = CustomResourceDefinitionList{} }
+func (*CustomResourceDefinitionList) ProtoMessage() {}
+func (*CustomResourceDefinitionList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2ae25e910fba1c55, []int{1}
+}
+func (m *CustomResourceDefinitionList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *CustomResourceDefinitionList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *CustomResourceDefinitionList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CustomResourceDefinitionList.Merge(m, src)
+}
+func (m *CustomResourceDefinitionList) XXX_Size() int {
+ return m.Size()
+}
+func (m *CustomResourceDefinitionList) XXX_DiscardUnknown() {
+ xxx_messageInfo_CustomResourceDefinitionList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CustomResourceDefinitionList proto.InternalMessageInfo
+
+func init() {
+ proto.RegisterType((*CustomResourceDefinition)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.apiextensions.v1.CustomResourceDefinition")
+ proto.RegisterType((*CustomResourceDefinitionList)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.apiextensions.v1.CustomResourceDefinitionList")
+}
+
+func init() {
+ proto.RegisterFile("github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/apiextensions/v1/generated.proto", fileDescriptor_2ae25e910fba1c55)
+}
+
+var fileDescriptor_2ae25e910fba1c55 = []byte{
+ // 378 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x93, 0x4f, 0x8b, 0xda, 0x40,
+ 0x18, 0x87, 0x33, 0x16, 0x41, 0x22, 0x85, 0x92, 0x93, 0x48, 0x19, 0xc5, 0x93, 0x97, 0x4e, 0xd0,
+ 0x43, 0xf1, 0x66, 0x49, 0x4b, 0xa1, 0xd0, 0x22, 0x78, 0x6b, 0x6f, 0x63, 0x7c, 0x8d, 0xd3, 0x38,
+ 0x33, 0x21, 0x33, 0x91, 0xdd, 0xcb, 0xb2, 0x9f, 0x40, 0xf6, 0x63, 0x79, 0xf4, 0xe8, 0x49, 0xd6,
+ 0xec, 0x17, 0x59, 0x66, 0xfc, 0xb7, 0xbb, 0xae, 0x2c, 0xae, 0x97, 0xcc, 0x3b, 0x2f, 0x79, 0x9f,
+ 0xdf, 0x33, 0x21, 0xe3, 0xf6, 0x22, 0xa6, 0xc7, 0xd9, 0x80, 0x84, 0x92, 0xfb, 0x21, 0x9b, 0xb0,
+ 0x6c, 0xbf, 0x24, 0x71, 0xe4, 0xc7, 0x1d, 0xe5, 0xab, 0x09, 0xe3, 0xb6, 0xa0, 0x09, 0xb3, 0x0f,
+ 0xb8, 0xd2, 0x20, 0x14, 0x93, 0x42, 0xf9, 0xd3, 0x96, 0x1f, 0x81, 0x80, 0x94, 0x6a, 0x18, 0x92,
+ 0x24, 0x95, 0x5a, 0x7a, 0xdd, 0x03, 0x90, 0x6c, 0x48, 0xbb, 0x25, 0x89, 0x23, 0x12, 0x77, 0x14,
+ 0x31, 0x40, 0x5b, 0x18, 0x20, 0x79, 0x06, 0x24, 0xd3, 0x56, 0xf5, 0xe7, 0x99, 0x46, 0x1c, 0x34,
+ 0x7d, 0x45, 0xa4, 0xfa, 0xe5, 0x09, 0x27, 0x92, 0x91, 0xf4, 0x6d, 0x7b, 0x90, 0x8d, 0xec, 0xce,
+ 0x6e, 0x6c, 0xb5, 0x7d, 0xdd, 0x00, 0x09, 0x93, 0x86, 0xc9, 0x69, 0x38, 0x66, 0x02, 0xd2, 0x6b,
+ 0x9b, 0x98, 0x66, 0x42, 0x33, 0x0e, 0x47, 0xfc, 0xaf, 0x6f, 0x0d, 0xa8, 0x70, 0x0c, 0x9c, 0xbe,
+ 0x9c, 0x6b, 0xcc, 0x90, 0x5b, 0xf9, 0x9e, 0x29, 0x2d, 0x79, 0x1f, 0x94, 0xcc, 0xd2, 0x10, 0x7e,
+ 0xc0, 0x88, 0x09, 0xa6, 0x99, 0x14, 0x5e, 0xea, 0x96, 0xcc, 0x79, 0x86, 0x54, 0xd3, 0x0a, 0xaa,
+ 0xa3, 0x66, 0xb9, 0x1d, 0x90, 0x33, 0x3f, 0xa8, 0x99, 0x27, 0xd3, 0x16, 0xe9, 0x0d, 0xfe, 0x43,
+ 0xa8, 0xff, 0x80, 0xa6, 0x81, 0x37, 0x5f, 0xd5, 0x9c, 0x7c, 0x55, 0x73, 0x0f, 0xbd, 0xfe, 0x3e,
+ 0xa7, 0x31, 0x2b, 0xb8, 0x9f, 0x4f, 0x09, 0xfd, 0x66, 0x4a, 0x7b, 0xe2, 0x48, 0xea, 0xdb, 0x7b,
+ 0xa5, 0x0c, 0xcf, 0x2a, 0x7d, 0xda, 0x2a, 0x95, 0x76, 0x9d, 0x83, 0x90, 0x77, 0xe3, 0x16, 0x99,
+ 0x06, 0xae, 0x2a, 0x85, 0xfa, 0x87, 0x66, 0xb9, 0xfd, 0x97, 0x5c, 0xf8, 0x4b, 0x91, 0x53, 0xa7,
+ 0x0b, 0x3e, 0x6e, 0x2d, 0x8a, 0xbf, 0x4c, 0x5e, 0x7f, 0x13, 0x1b, 0xc0, 0x7c, 0x8d, 0x9d, 0xc5,
+ 0x1a, 0x3b, 0xcb, 0x35, 0x76, 0x6e, 0x73, 0x8c, 0xe6, 0x39, 0x46, 0x8b, 0x1c, 0xa3, 0x65, 0x8e,
+ 0xd1, 0x7d, 0x8e, 0xd1, 0xdd, 0x03, 0x76, 0xfe, 0x75, 0x2f, 0xbc, 0x39, 0x8f, 0x01, 0x00, 0x00,
+ 0xff, 0xff, 0xe9, 0x04, 0x05, 0xb8, 0x7b, 0x03, 0x00, 0x00,
+}
+
+func (m *CustomResourceDefinition) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *CustomResourceDefinition) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CustomResourceDefinition) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *CustomResourceDefinitionList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *CustomResourceDefinitionList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CustomResourceDefinitionList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
+ offset -= sovGenerated(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *CustomResourceDefinition) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func (m *CustomResourceDefinitionList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
+func sovGenerated(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozGenerated(x uint64) (n int) {
+ return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *CustomResourceDefinition) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&CustomResourceDefinition{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *CustomResourceDefinitionList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]CustomResourceDefinition{"
+ for _, f := range this.Items {
+ repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "CustomResourceDefinition", "CustomResourceDefinition", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&CustomResourceDefinitionList{`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `Items:` + repeatedStringForItems + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func valueToStringGenerated(v interface{}) string {
+ rv := reflect.ValueOf(v)
+ if rv.IsNil() {
+ return "nil"
+ }
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("*%v", pv)
+}
+func (m *CustomResourceDefinition) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: CustomResourceDefinition: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: CustomResourceDefinition: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *CustomResourceDefinitionList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: CustomResourceDefinitionList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: CustomResourceDefinitionList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, CustomResourceDefinition{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipGenerated(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthGenerated
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupGenerated
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthGenerated
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/apiextensions/v1/generated.proto b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/apiextensions/v1/generated.proto
new file mode 100644
index 000000000..a78a19661
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/apiextensions/v1/generated.proto
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = "proto2";
+
+package github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.apiextensions.v1;
+
+import "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/apiextensions/v1";
+
+// CustomResourceDefinition represents a resource that should be exposed on the API server. Its name MUST be in the format
+// <.spec.name>.<.spec.group>.
+message CustomResourceDefinition {
+ // Standard object's metadata
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // +optional
+ optional github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.ObjectMeta metadata = 1;
+}
+
+// CustomResourceDefinitionList is a list of CustomResourceDefinition objects.
+message CustomResourceDefinitionList {
+ // Standard object's metadata
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // +optional
+ optional github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.ListMeta metadata = 1;
+
+ // items list individual CustomResourceDefinition objects
+ repeated CustomResourceDefinition items = 2;
+}
+
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/apiextensions/v1/register.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/apiextensions/v1/register.go
new file mode 100644
index 000000000..71fba678c
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/apiextensions/v1/register.go
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Copyright 2017 The Kubernetes Authors.
+
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+const GroupName = "apiextensions.k8s.io"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
+
+// Kind takes an unqualified kind and returns back a Group qualified GroupKind
+func Kind(kind string) schema.GroupKind {
+ return SchemeGroupVersion.WithKind(kind).GroupKind()
+}
+
+// Resource takes an unqualified resource and returns back a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+ return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+ SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
+ localSchemeBuilder = &SchemeBuilder
+ AddToScheme = localSchemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to the given scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &CustomResourceDefinition{},
+ &CustomResourceDefinitionList{},
+ )
+ metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/apiextensions/v1/types.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/apiextensions/v1/types.go
new file mode 100644
index 000000000..9be5bf3e2
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/apiextensions/v1/types.go
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Copyright 2019 The Kubernetes Authors.
+
+package v1
+
+import (
+ metav1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// CustomResourceDefinition represents a resource that should be exposed on the API server. Its name MUST be in the format
+// <.spec.name>.<.spec.group>.
+type CustomResourceDefinition struct {
+ metav1.TypeMeta `json:",inline"`
+ // Standard object's metadata
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// CustomResourceDefinitionList is a list of CustomResourceDefinition objects.
+type CustomResourceDefinitionList struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // Standard object's metadata
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // items list individual CustomResourceDefinition objects
+ Items []CustomResourceDefinition `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/apiextensions/v1/zz_generated.deepcopy.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/apiextensions/v1/zz_generated.deepcopy.go
new file mode 100644
index 000000000..0acce338e
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/apiextensions/v1/zz_generated.deepcopy.go
@@ -0,0 +1,72 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CustomResourceDefinition) DeepCopyInto(out *CustomResourceDefinition) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinition.
+func (in *CustomResourceDefinition) DeepCopy() *CustomResourceDefinition {
+ if in == nil {
+ return nil
+ }
+ out := new(CustomResourceDefinition)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *CustomResourceDefinition) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CustomResourceDefinitionList) DeepCopyInto(out *CustomResourceDefinitionList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]CustomResourceDefinition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinitionList.
+func (in *CustomResourceDefinitionList) DeepCopy() *CustomResourceDefinitionList {
+ if in == nil {
+ return nil
+ }
+ out := new(CustomResourceDefinitionList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *CustomResourceDefinitionList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/labels/selector.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/labels/selector.go
index 13358faed..0eac5f4be 100644
--- a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/labels/selector.go
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/labels/selector.go
@@ -267,8 +267,8 @@ func (r *Requirement) Operator() selection.Operator {
}
// Values returns requirement values
-func (r *Requirement) Values() sets.String {
- ret := sets.String{}
+func (r *Requirement) Values() sets.Set[string] {
+ ret := sets.New[string]()
for i := range r.strValues {
ret.Insert(r.strValues[i])
}
@@ -686,7 +686,7 @@ func (p *Parser) parseRequirement() (*Requirement, error) {
if err != nil {
return nil, err
}
- var values sets.String
+ var values sets.Set[string]
switch operator {
case selection.In, selection.NotIn:
values, err = p.parseValues()
@@ -696,7 +696,7 @@ func (p *Parser) parseRequirement() (*Requirement, error) {
if err != nil {
return nil, err
}
- return NewRequirement(key, operator, values.List())
+ return NewRequirement(key, operator, sets.List(values))
}
@@ -752,7 +752,7 @@ func (p *Parser) parseOperator() (op selection.Operator, err error) {
}
// parseValues parses the values for set based matching (x,y,z)
-func (p *Parser) parseValues() (sets.String, error) {
+func (p *Parser) parseValues() (sets.Set[string], error) {
tok, lit := p.consume(Values)
if tok != OpenParToken {
return nil, fmt.Errorf("found '%s' expected: '('", lit)
@@ -770,7 +770,7 @@ func (p *Parser) parseValues() (sets.String, error) {
return s, nil
case ClosedParToken: // handles "()"
p.consume(Values)
- return sets.NewString(""), nil
+ return sets.New[string](""), nil
default:
return nil, fmt.Errorf("found '%s', expected: ',', ')' or identifier", lit)
}
@@ -778,8 +778,8 @@ func (p *Parser) parseValues() (sets.String, error) {
// parseIdentifiersList parses a (possibly empty) list of
// of comma separated (possibly empty) identifiers
-func (p *Parser) parseIdentifiersList() (sets.String, error) {
- s := sets.NewString()
+func (p *Parser) parseIdentifiersList() (sets.Set[string], error) {
+ s := sets.New[string]()
for {
tok, lit := p.consume(Values)
switch tok {
@@ -814,8 +814,8 @@ func (p *Parser) parseIdentifiersList() (sets.String, error) {
}
// parseExactValue parses the only value for exact match style
-func (p *Parser) parseExactValue() (sets.String, error) {
- s := sets.NewString()
+func (p *Parser) parseExactValue() (sets.Set[string], error) {
+ s := sets.New[string]()
tok, _ := p.lookahead(Values)
if tok == EndOfStringToken || tok == CommaToken {
s.Insert("")
@@ -908,7 +908,7 @@ func SelectorFromSet(ls Set) Selector {
// nil and empty Sets are considered equivalent to Everything().
// The Set is validated client-side, which allows to catch errors early.
func ValidatedSelectorFromSet(ls Set) (Selector, error) {
- if ls == nil || len(ls) == 0 {
+ if len(ls) == 0 {
return internalSelector{}, nil
}
requirements := make([]Requirement, 0, len(ls))
@@ -930,7 +930,7 @@ func ValidatedSelectorFromSet(ls Set) (Selector, error) {
// Note: this method copies the Set; if the Set is immutable, consider wrapping it with ValidatedSetSelector
// instead, which does not copy.
func SelectorFromValidatedSet(ls Set) Selector {
- if ls == nil || len(ls) == 0 {
+ if len(ls) == 0 {
return internalSelector{}
}
requirements := make([]Requirement, 0, len(ls))
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/validation/validation.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/validation/validation.go
new file mode 100644
index 000000000..07857fefa
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/validation/validation.go
@@ -0,0 +1,81 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Copyright 2015 The Kubernetes Authors.
+
+package validation
+
+import (
+ "k8s.io/apimachinery/pkg/util/validation"
+ "k8s.io/apimachinery/pkg/util/validation/field"
+
+ slim_metav1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1"
+)
+
+// LabelSelectorValidationOptions is a struct that can be passed to ValidateLabelSelector to record the validate options
+type LabelSelectorValidationOptions struct {
+ // Allow invalid label value in selector
+ AllowInvalidLabelValueInSelector bool
+}
+
+// ValidateLabelSelector validate the LabelSelector according to the opts and returns any validation errors.
+// opts.AllowInvalidLabelValueInSelector is only expected to be set to true when required for backwards compatibility with existing invalid data.
+func ValidateLabelSelector(ps *slim_metav1.LabelSelector, opts LabelSelectorValidationOptions, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ if ps == nil {
+ return allErrs
+ }
+ allErrs = append(allErrs, ValidateLabels(ps.MatchLabels, fldPath.Child("matchLabels"))...)
+ for i, expr := range ps.MatchExpressions {
+ allErrs = append(allErrs, ValidateLabelSelectorRequirement(expr, opts, fldPath.Child("matchExpressions").Index(i))...)
+ }
+ return allErrs
+}
+
+// ValidateLabelSelectorRequirement validate the requirement according to the opts and returns any validation errors.
+// opts.AllowInvalidLabelValueInSelector is only expected to be set to true when required for backwards compatibility with existing invalid data.
+func ValidateLabelSelectorRequirement(sr slim_metav1.LabelSelectorRequirement, opts LabelSelectorValidationOptions, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ switch sr.Operator {
+ case slim_metav1.LabelSelectorOpIn, slim_metav1.LabelSelectorOpNotIn:
+ if len(sr.Values) == 0 {
+ allErrs = append(allErrs, field.Required(fldPath.Child("values"), "must be specified when `operator` is 'In' or 'NotIn'"))
+ }
+ case slim_metav1.LabelSelectorOpExists, slim_metav1.LabelSelectorOpDoesNotExist:
+ if len(sr.Values) > 0 {
+ allErrs = append(allErrs, field.Forbidden(fldPath.Child("values"), "may not be specified when `operator` is 'Exists' or 'DoesNotExist'"))
+ }
+ default:
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("operator"), sr.Operator, "not a valid selector operator"))
+ }
+ allErrs = append(allErrs, ValidateLabelName(sr.Key, fldPath.Child("key"))...)
+ if !opts.AllowInvalidLabelValueInSelector {
+ for valueIndex, value := range sr.Values {
+ for _, msg := range validation.IsValidLabelValue(value) {
+ allErrs = append(allErrs, field.Invalid(fldPath.Child("values").Index(valueIndex), value, msg))
+ }
+ }
+ }
+ return allErrs
+}
+
+// ValidateLabelName validates that the label name is correctly defined.
+func ValidateLabelName(labelName string, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ for _, msg := range validation.IsQualifiedName(labelName) {
+ allErrs = append(allErrs, field.Invalid(fldPath, labelName, msg))
+ }
+ return allErrs
+}
+
+// ValidateLabels validates that a set of labels are correctly defined.
+func ValidateLabels(labels map[string]string, fldPath *field.Path) field.ErrorList {
+ allErrs := field.ErrorList{}
+ for k, v := range labels {
+ allErrs = append(allErrs, ValidateLabelName(k, fldPath)...)
+ for _, msg := range validation.IsValidLabelValue(v) {
+ allErrs = append(allErrs, field.Invalid(fldPath, v, msg))
+ }
+ }
+ return allErrs
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1beta1/doc.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1beta1/doc.go
new file mode 100644
index 000000000..b55d23db5
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1beta1/doc.go
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Copyright 2017 The Kubernetes Authors.
+
+// +k8s:deepcopy-gen=package
+// +k8s:openapi-gen=true
+// +k8s:defaulter-gen=TypeMeta
+
+// +groupName=meta
+
+package v1beta1
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1beta1/generated.pb.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1beta1/generated.pb.go
new file mode 100644
index 000000000..b419723c8
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1beta1/generated.pb.go
@@ -0,0 +1,401 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1beta1/generated.proto
+
+package v1beta1
+
+import (
+ fmt "fmt"
+
+ v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1"
+
+ io "io"
+
+ proto "github.com/gogo/protobuf/proto"
+
+ math "math"
+ math_bits "math/bits"
+ reflect "reflect"
+ strings "strings"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+func (m *PartialObjectMetadataList) Reset() { *m = PartialObjectMetadataList{} }
+func (*PartialObjectMetadataList) ProtoMessage() {}
+func (*PartialObjectMetadataList) Descriptor() ([]byte, []int) {
+ return fileDescriptor_1a84ae209524fd15, []int{0}
+}
+func (m *PartialObjectMetadataList) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *PartialObjectMetadataList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *PartialObjectMetadataList) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_PartialObjectMetadataList.Merge(m, src)
+}
+func (m *PartialObjectMetadataList) XXX_Size() int {
+ return m.Size()
+}
+func (m *PartialObjectMetadataList) XXX_DiscardUnknown() {
+ xxx_messageInfo_PartialObjectMetadataList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PartialObjectMetadataList proto.InternalMessageInfo
+
+func init() {
+ proto.RegisterType((*PartialObjectMetadataList)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1beta1.PartialObjectMetadataList")
+}
+
+func init() {
+ proto.RegisterFile("github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1beta1/generated.proto", fileDescriptor_1a84ae209524fd15)
+}
+
+var fileDescriptor_1a84ae209524fd15 = []byte{
+ // 322 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x92, 0x3f, 0x4f, 0x02, 0x31,
+ 0x18, 0x87, 0xaf, 0x1a, 0x12, 0x72, 0xc4, 0xc4, 0x30, 0x21, 0x43, 0x21, 0x4e, 0x2c, 0xb6, 0x81,
+ 0xc1, 0x90, 0xb0, 0x18, 0x06, 0x13, 0xa3, 0x44, 0xc3, 0xe8, 0xf6, 0xde, 0x51, 0x8f, 0x7a, 0xf4,
+ 0xee, 0x72, 0x7d, 0xcf, 0xc4, 0xcd, 0x8f, 0xe0, 0xc7, 0x62, 0x64, 0x64, 0x22, 0x52, 0x3f, 0x88,
+ 0xa6, 0xe5, 0x40, 0x43, 0x9c, 0x70, 0x7a, 0xff, 0xa4, 0x79, 0x9e, 0x5f, 0x9b, 0xfa, 0xb7, 0x91,
+ 0xc4, 0x69, 0x11, 0xb0, 0x30, 0x55, 0x3c, 0x94, 0x33, 0x59, 0xec, 0x4a, 0x16, 0x47, 0x3c, 0xee,
+ 0x6b, 0xae, 0x67, 0x52, 0xb9, 0x06, 0x32, 0xa9, 0xb9, 0x12, 0x08, 0xfc, 0xa5, 0x1b, 0x08, 0x84,
+ 0x2e, 0x8f, 0x44, 0x22, 0x72, 0x40, 0x31, 0x61, 0x59, 0x9e, 0x62, 0x5a, 0x1f, 0xfc, 0xc0, 0xd8,
+ 0x86, 0xb2, 0x2d, 0x59, 0x1c, 0xb1, 0xb8, 0xaf, 0x99, 0x85, 0xb9, 0xc6, 0xc2, 0x98, 0x85, 0xb1,
+ 0x12, 0xd6, 0xbc, 0x3e, 0x2c, 0xc9, 0x7e, 0x88, 0xe6, 0xc5, 0x2f, 0x4e, 0x94, 0x46, 0x29, 0x77,
+ 0xeb, 0xa0, 0x78, 0x72, 0x93, 0x1b, 0x5c, 0x57, 0x1e, 0xbf, 0xb4, 0x69, 0x64, 0x6a, 0x99, 0x0a,
+ 0xc2, 0xa9, 0x4c, 0x44, 0xfe, 0xea, 0x8c, 0x79, 0x91, 0xa0, 0x54, 0x82, 0xeb, 0x70, 0x2a, 0x14,
+ 0xec, 0x6b, 0xce, 0xbf, 0x88, 0x7f, 0xf6, 0x00, 0x39, 0x4a, 0x98, 0xdd, 0x07, 0xcf, 0x22, 0xc4,
+ 0x91, 0x40, 0x98, 0x00, 0xc2, 0x9d, 0xd4, 0x58, 0x4f, 0xfc, 0xaa, 0x2a, 0xe7, 0xc6, 0x51, 0x9b,
+ 0x74, 0x6a, 0xbd, 0x2b, 0x76, 0xd8, 0xe3, 0x30, 0xcb, 0xb3, 0xec, 0xe1, 0xe9, 0x7c, 0xd5, 0xf2,
+ 0xcc, 0xaa, 0x55, 0xdd, 0x6e, 0xc6, 0x3b, 0x47, 0x3d, 0xf7, 0x2b, 0x12, 0x85, 0xd2, 0x0d, 0xd2,
+ 0x3e, 0xee, 0xd4, 0x7a, 0xa3, 0x43, 0x65, 0x7f, 0xde, 0x68, 0x78, 0x52, 0x9a, 0x2b, 0x37, 0xd6,
+ 0x31, 0xde, 0xa8, 0x86, 0x30, 0x5f, 0x53, 0x6f, 0xb1, 0xa6, 0xde, 0x72, 0x4d, 0xbd, 0x37, 0x43,
+ 0xc9, 0xdc, 0x50, 0xb2, 0x30, 0x94, 0x2c, 0x0d, 0x25, 0x1f, 0x86, 0x92, 0xf7, 0x4f, 0xea, 0x3d,
+ 0x0e, 0xfe, 0xf1, 0xc1, 0xbe, 0x03, 0x00, 0x00, 0xff, 0xff, 0x8e, 0xe4, 0x91, 0x70, 0x9e, 0x02,
+ 0x00, 0x00,
+}
+
+func (m *PartialObjectMetadataList) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *PartialObjectMetadataList) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PartialObjectMetadataList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ if len(m.Items) > 0 {
+ for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
+ offset -= sovGenerated(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *PartialObjectMetadataList) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Items) > 0 {
+ for _, e := range m.Items {
+ l = e.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ l = m.ListMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func sovGenerated(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozGenerated(x uint64) (n int) {
+ return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *PartialObjectMetadataList) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForItems := "[]PartialObjectMetadata{"
+ for _, f := range this.Items {
+ repeatedStringForItems += fmt.Sprintf("%v", f) + ","
+ }
+ repeatedStringForItems += "}"
+ s := strings.Join([]string{`&PartialObjectMetadataList{`,
+ `Items:` + repeatedStringForItems + `,`,
+ `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func valueToStringGenerated(v interface{}) string {
+ rv := reflect.ValueOf(v)
+ if rv.IsNil() {
+ return "nil"
+ }
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("*%v", pv)
+}
+func (m *PartialObjectMetadataList) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PartialObjectMetadataList: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PartialObjectMetadataList: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Items = append(m.Items, v1.PartialObjectMetadata{})
+ if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipGenerated(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthGenerated
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupGenerated
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthGenerated
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1beta1/generated.proto b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1beta1/generated.proto
new file mode 100644
index 000000000..be61fe3a0
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1beta1/generated.proto
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = "proto2";
+
+package github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1beta1;
+
+import "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1beta1";
+
+// PartialObjectMetadataList contains a list of objects containing only their metadata.
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+message PartialObjectMetadataList {
+ // Standard list metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ // +optional
+ optional github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.ListMeta metadata = 2;
+
+ // items contains each of the included items.
+ repeated github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.PartialObjectMetadata items = 1;
+}
+
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1beta1/register.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1beta1/register.go
new file mode 100644
index 000000000..5d62e0655
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1beta1/register.go
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Copyright 2017 The Kubernetes Authors.
+
+package v1beta1
+
+import (
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// GroupName is the group name for this API.
+const GroupName = "meta.k8s.io"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"}
+
+// Kind takes an unqualified kind and returns a Group qualified GroupKind
+func Kind(kind string) schema.GroupKind {
+ return SchemeGroupVersion.WithKind(kind).GroupKind()
+}
+
+// AddMetaToScheme registers base meta types into schemas.
+func AddMetaToScheme(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &PartialObjectMetadata{},
+ &PartialObjectMetadataList{},
+ )
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1beta1/types.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1beta1/types.go
new file mode 100644
index 000000000..aa1a77b82
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1beta1/types.go
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Copyright 2017 The Kubernetes Authors.
+
+// package v1beta1 is alpha objects from meta that will be introduced.
+package v1beta1
+
+import (
+ v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1"
+)
+
+// PartialObjectMetadata is a generic representation of any object with ObjectMeta. It allows clients
+// to get access to a particular ObjectMeta schema without knowing the details of the version.
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+type PartialObjectMetadata = v1.PartialObjectMetadata
+
+// IMPORTANT: PartialObjectMetadataList has different protobuf field ids in v1beta1 than
+// v1 because ListMeta was accidentally omitted prior to 1.15. Therefore this type must
+// remain independent of v1.PartialObjectMetadataList to preserve mappings.
+
+// PartialObjectMetadataList contains a list of objects containing only their metadata.
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+type PartialObjectMetadataList struct {
+ v1.TypeMeta `json:",inline"`
+ // Standard list metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ // +optional
+ v1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,2,opt,name=metadata"`
+
+ // items contains each of the included items.
+ Items []v1.PartialObjectMetadata `json:"items" protobuf:"bytes,1,rep,name=items"`
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1beta1/zz_generated.deepcopy.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1beta1/zz_generated.deepcopy.go
new file mode 100644
index 000000000..d3b949f0d
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1beta1/zz_generated.deepcopy.go
@@ -0,0 +1,47 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PartialObjectMetadataList) DeepCopyInto(out *PartialObjectMetadataList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]v1.PartialObjectMetadata, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PartialObjectMetadataList.
+func (in *PartialObjectMetadataList) DeepCopy() *PartialObjectMetadataList {
+ if in == nil {
+ return nil
+ }
+ out := new(PartialObjectMetadataList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PartialObjectMetadataList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr/doc.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr/doc.go
new file mode 100644
index 000000000..3955e271b
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr/doc.go
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// +deepequal-gen=package
+
+// Package types contains slimmer versions of k8s types.
+// +groupName=util
+package intstr
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr/generated.pb.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr/generated.pb.go
new file mode 100644
index 000000000..376a6c0f8
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr/generated.pb.go
@@ -0,0 +1,356 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr/generated.proto
+
+package intstr
+
+import (
+ fmt "fmt"
+
+ io "io"
+ math "math"
+ math_bits "math/bits"
+
+ proto "github.com/gogo/protobuf/proto"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+func (m *IntOrString) Reset() { *m = IntOrString{} }
+func (*IntOrString) ProtoMessage() {}
+func (*IntOrString) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8984be45904ea297, []int{0}
+}
+func (m *IntOrString) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *IntOrString) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *IntOrString) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_IntOrString.Merge(m, src)
+}
+func (m *IntOrString) XXX_Size() int {
+ return m.Size()
+}
+func (m *IntOrString) XXX_DiscardUnknown() {
+ xxx_messageInfo_IntOrString.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_IntOrString proto.InternalMessageInfo
+
+func init() {
+ proto.RegisterType((*IntOrString)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.util.intstr.IntOrString")
+}
+
+func init() {
+ proto.RegisterFile("github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr/generated.proto", fileDescriptor_8984be45904ea297)
+}
+
+var fileDescriptor_8984be45904ea297 = []byte{
+ // 293 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xf2, 0x4a, 0xcf, 0x2c, 0xc9,
+ 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcc, 0xc9, 0x2c, 0x85, 0x53, 0x05, 0xd9,
+ 0xe9, 0xfa, 0xd9, 0x16, 0xc5, 0xfa, 0xc5, 0x39, 0x99, 0xb9, 0x60, 0x46, 0x62, 0x41, 0x66, 0xb1,
+ 0x7e, 0x69, 0x49, 0x66, 0x8e, 0x7e, 0x66, 0x5e, 0x49, 0x71, 0x49, 0x91, 0x7e, 0x7a, 0x6a, 0x5e,
+ 0x6a, 0x51, 0x62, 0x49, 0x6a, 0x8a, 0x5e, 0x41, 0x51, 0x7e, 0x49, 0xbe, 0x90, 0x15, 0xc2, 0x2c,
+ 0x3d, 0x88, 0x21, 0x30, 0xaa, 0x20, 0x3b, 0x5d, 0x2f, 0xdb, 0xa2, 0x58, 0x0f, 0x64, 0x16, 0x98,
+ 0x01, 0x32, 0x4b, 0x0f, 0x64, 0x96, 0x1e, 0xc4, 0x2c, 0x29, 0x5d, 0x24, 0x77, 0xa4, 0xe7, 0xa7,
+ 0xe7, 0xeb, 0x83, 0x8d, 0x4c, 0x2a, 0x4d, 0x03, 0xf3, 0xc0, 0x1c, 0x30, 0x0b, 0x62, 0x95, 0xd2,
+ 0x44, 0x46, 0x2e, 0x6e, 0xcf, 0xbc, 0x12, 0xff, 0xa2, 0xe0, 0x92, 0xa2, 0xcc, 0xbc, 0x74, 0x21,
+ 0x0d, 0x2e, 0x96, 0x92, 0xca, 0x82, 0x54, 0x09, 0x46, 0x05, 0x46, 0x0d, 0x66, 0x27, 0x91, 0x13,
+ 0xf7, 0xe4, 0x19, 0x1e, 0xdd, 0x93, 0x67, 0x09, 0xa9, 0x2c, 0x48, 0xfd, 0x05, 0xa5, 0x83, 0xc0,
+ 0x2a, 0x84, 0xd4, 0xb8, 0xd8, 0x32, 0xf3, 0x4a, 0xc2, 0x12, 0x73, 0x24, 0x98, 0x14, 0x18, 0x35,
+ 0x58, 0x9d, 0xf8, 0xa0, 0x6a, 0xd9, 0x3c, 0xc1, 0xa2, 0x41, 0x50, 0x59, 0x90, 0xba, 0xe2, 0x92,
+ 0x22, 0x90, 0x3a, 0x66, 0x05, 0x46, 0x0d, 0x4e, 0x84, 0xba, 0x60, 0xb0, 0x68, 0x10, 0x54, 0xd6,
+ 0x8a, 0x63, 0xc6, 0x02, 0x79, 0x86, 0x86, 0x3b, 0x0a, 0x0c, 0x4e, 0x09, 0x27, 0x1e, 0xca, 0x31,
+ 0x5c, 0x78, 0x28, 0xc7, 0x70, 0xe3, 0xa1, 0x1c, 0x43, 0xc3, 0x23, 0x39, 0xc6, 0x13, 0x8f, 0xe4,
+ 0x18, 0x2f, 0x3c, 0x92, 0x63, 0xbc, 0xf1, 0x48, 0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x09, 0x8f,
+ 0xe5, 0x18, 0xa2, 0xac, 0xc8, 0x0f, 0x70, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x79, 0x17, 0x78,
+ 0x08, 0xad, 0x01, 0x00, 0x00,
+}
+
+func (m *IntOrString) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *IntOrString) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *IntOrString) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i -= len(m.StrVal)
+ copy(dAtA[i:], m.StrVal)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.StrVal)))
+ i--
+ dAtA[i] = 0x1a
+ i = encodeVarintGenerated(dAtA, i, uint64(m.IntVal))
+ i--
+ dAtA[i] = 0x10
+ i = encodeVarintGenerated(dAtA, i, uint64(m.Type))
+ i--
+ dAtA[i] = 0x8
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
+ offset -= sovGenerated(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *IntOrString) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ n += 1 + sovGenerated(uint64(m.Type))
+ n += 1 + sovGenerated(uint64(m.IntVal))
+ l = len(m.StrVal)
+ n += 1 + l + sovGenerated(uint64(l))
+ return n
+}
+
+func sovGenerated(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozGenerated(x uint64) (n int) {
+ return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *IntOrString) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: IntOrString: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: IntOrString: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ m.Type = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Type |= Type(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field IntVal", wireType)
+ }
+ m.IntVal = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.IntVal |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field StrVal", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.StrVal = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipGenerated(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthGenerated
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupGenerated
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthGenerated
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr/generated.proto b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr/generated.proto
new file mode 100644
index 000000000..2d929fd18
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr/generated.proto
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = "proto2";
+
+package github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.util.intstr;
+
+// Package-wide variables from generator "generated".
+option go_package = "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr";
+
+// IntOrString is a type that can hold an int32 or a string. When used in
+// JSON or YAML marshalling and unmarshalling, it produces or consumes the
+// inner type. This allows you to have, for example, a JSON field that can
+// accept a name or number.
+// TODO: Rename to Int32OrString
+//
+// +protobuf=true
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+// +k8s:openapi-gen=true
+message IntOrString {
+ optional int64 type = 1;
+
+ optional int32 intVal = 2;
+
+ optional string strVal = 3;
+}
+
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr/intstr.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr/intstr.go
new file mode 100644
index 000000000..529d5a903
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr/intstr.go
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Copyright 2014 The Kubernetes Authors.
+
+package intstr
+
+import (
+ "encoding/json"
+ "fmt"
+ "strconv"
+)
+
+// IntOrString is a type that can hold an int32 or a string. When used in
+// JSON or YAML marshalling and unmarshalling, it produces or consumes the
+// inner type. This allows you to have, for example, a JSON field that can
+// accept a name or number.
+// TODO: Rename to Int32OrString
+//
+// +protobuf=true
+// +protobuf.options.(gogoproto.goproto_stringer)=false
+// +k8s:openapi-gen=true
+type IntOrString struct {
+ Type Type `protobuf:"varint,1,opt,name=type,casttype=Type"`
+ IntVal int32 `protobuf:"varint,2,opt,name=intVal"`
+ StrVal string `protobuf:"bytes,3,opt,name=strVal"`
+}
+
+// Type represents the stored type of IntOrString.
+type Type int64
+
+const (
+ Int Type = iota // The IntOrString holds an int.
+ String // The IntOrString holds a string.
+)
+
+// UnmarshalJSON implements the json.Unmarshaller interface.
+func (intstr *IntOrString) UnmarshalJSON(value []byte) error {
+ if value[0] == '"' {
+ intstr.Type = String
+ return json.Unmarshal(value, &intstr.StrVal)
+ }
+ intstr.Type = Int
+ return json.Unmarshal(value, &intstr.IntVal)
+}
+
+// String returns the string value, or the Itoa of the int value.
+func (intstr *IntOrString) String() string {
+ if intstr == nil {
+ return ""
+ }
+ if intstr.Type == String {
+ return intstr.StrVal
+ }
+ return strconv.Itoa(intstr.IntValue())
+}
+
+// IntValue returns the IntVal if type Int, or if
+// it is a String, will attempt a conversion to int,
+// returning 0 if a parsing error occurs.
+func (intstr *IntOrString) IntValue() int {
+ if intstr.Type == String {
+ i, _ := strconv.Atoi(intstr.StrVal)
+ return i
+ }
+ return int(intstr.IntVal)
+}
+
+// MarshalJSON implements the json.Marshaller interface.
+func (intstr IntOrString) MarshalJSON() ([]byte, error) {
+ switch intstr.Type {
+ case Int:
+ return json.Marshal(intstr.IntVal)
+ case String:
+ return json.Marshal(intstr.StrVal)
+ default:
+ return []byte{}, fmt.Errorf("impossible IntOrString.Type")
+ }
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr/zz_generated.deepequal.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr/zz_generated.deepequal.go
new file mode 100644
index 000000000..c9cb4ad58
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr/zz_generated.deepequal.go
@@ -0,0 +1,29 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by deepequal-gen. DO NOT EDIT.
+
+package intstr
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *IntOrString) DeepEqual(other *IntOrString) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.Type != other.Type {
+ return false
+ }
+ if in.IntVal != other.IntVal {
+ return false
+ }
+ if in.StrVal != other.StrVal {
+ return false
+ }
+
+ return true
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/clientset.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/clientset.go
new file mode 100644
index 000000000..516fa061d
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/clientset.go
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package versioned
+
+import (
+ "fmt"
+ "net/http"
+
+ corev1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1"
+ discoveryv1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1"
+ discoveryv1beta1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1beta1"
+ networkingv1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/networking/v1"
+ discovery "k8s.io/client-go/discovery"
+ rest "k8s.io/client-go/rest"
+ flowcontrol "k8s.io/client-go/util/flowcontrol"
+)
+
+type Interface interface {
+ Discovery() discovery.DiscoveryInterface
+ CoreV1() corev1.CoreV1Interface
+ DiscoveryV1beta1() discoveryv1beta1.DiscoveryV1beta1Interface
+ DiscoveryV1() discoveryv1.DiscoveryV1Interface
+ NetworkingV1() networkingv1.NetworkingV1Interface
+}
+
+// Clientset contains the clients for groups.
+type Clientset struct {
+ *discovery.DiscoveryClient
+ coreV1 *corev1.CoreV1Client
+ discoveryV1beta1 *discoveryv1beta1.DiscoveryV1beta1Client
+ discoveryV1 *discoveryv1.DiscoveryV1Client
+ networkingV1 *networkingv1.NetworkingV1Client
+}
+
+// CoreV1 retrieves the CoreV1Client
+func (c *Clientset) CoreV1() corev1.CoreV1Interface {
+ return c.coreV1
+}
+
+// DiscoveryV1beta1 retrieves the DiscoveryV1beta1Client
+func (c *Clientset) DiscoveryV1beta1() discoveryv1beta1.DiscoveryV1beta1Interface {
+ return c.discoveryV1beta1
+}
+
+// DiscoveryV1 retrieves the DiscoveryV1Client
+func (c *Clientset) DiscoveryV1() discoveryv1.DiscoveryV1Interface {
+ return c.discoveryV1
+}
+
+// NetworkingV1 retrieves the NetworkingV1Client
+func (c *Clientset) NetworkingV1() networkingv1.NetworkingV1Interface {
+ return c.networkingV1
+}
+
+// Discovery retrieves the DiscoveryClient
+func (c *Clientset) Discovery() discovery.DiscoveryInterface {
+ if c == nil {
+ return nil
+ }
+ return c.DiscoveryClient
+}
+
+// NewForConfig creates a new Clientset for the given config.
+// If config's RateLimiter is not set and QPS and Burst are acceptable,
+// NewForConfig will generate a rate-limiter in configShallowCopy.
+// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
+// where httpClient was generated with rest.HTTPClientFor(c).
+func NewForConfig(c *rest.Config) (*Clientset, error) {
+ configShallowCopy := *c
+
+ if configShallowCopy.UserAgent == "" {
+ configShallowCopy.UserAgent = rest.DefaultKubernetesUserAgent()
+ }
+
+ // share the transport between all clients
+ httpClient, err := rest.HTTPClientFor(&configShallowCopy)
+ if err != nil {
+ return nil, err
+ }
+
+ return NewForConfigAndClient(&configShallowCopy, httpClient)
+}
+
+// NewForConfigAndClient creates a new Clientset for the given config and http client.
+// Note the http client provided takes precedence over the configured transport values.
+// If config's RateLimiter is not set and QPS and Burst are acceptable,
+// NewForConfigAndClient will generate a rate-limiter in configShallowCopy.
+func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, error) {
+ configShallowCopy := *c
+ if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 {
+ if configShallowCopy.Burst <= 0 {
+ return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0")
+ }
+ configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst)
+ }
+
+ var cs Clientset
+ var err error
+ cs.coreV1, err = corev1.NewForConfigAndClient(&configShallowCopy, httpClient)
+ if err != nil {
+ return nil, err
+ }
+ cs.discoveryV1beta1, err = discoveryv1beta1.NewForConfigAndClient(&configShallowCopy, httpClient)
+ if err != nil {
+ return nil, err
+ }
+ cs.discoveryV1, err = discoveryv1.NewForConfigAndClient(&configShallowCopy, httpClient)
+ if err != nil {
+ return nil, err
+ }
+ cs.networkingV1, err = networkingv1.NewForConfigAndClient(&configShallowCopy, httpClient)
+ if err != nil {
+ return nil, err
+ }
+
+ cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfigAndClient(&configShallowCopy, httpClient)
+ if err != nil {
+ return nil, err
+ }
+ return &cs, nil
+}
+
+// NewForConfigOrDie creates a new Clientset for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *Clientset {
+ cs, err := NewForConfig(c)
+ if err != nil {
+ panic(err)
+ }
+ return cs
+}
+
+// New creates a new Clientset for the given RESTClient.
+func New(c rest.Interface) *Clientset {
+ var cs Clientset
+ cs.coreV1 = corev1.New(c)
+ cs.discoveryV1beta1 = discoveryv1beta1.New(c)
+ cs.discoveryV1 = discoveryv1.New(c)
+ cs.networkingV1 = networkingv1.New(c)
+
+ cs.DiscoveryClient = discovery.NewDiscoveryClient(c)
+ return &cs
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/fake/clientset_generated.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/fake/clientset_generated.go
new file mode 100644
index 000000000..03fbe929f
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/fake/clientset_generated.go
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ clientset "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned"
+ corev1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1"
+ fakecorev1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/fake"
+ discoveryv1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1"
+ fakediscoveryv1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1/fake"
+ discoveryv1beta1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1beta1"
+ fakediscoveryv1beta1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1beta1/fake"
+ networkingv1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/networking/v1"
+ fakenetworkingv1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/networking/v1/fake"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/watch"
+ "k8s.io/client-go/discovery"
+ fakediscovery "k8s.io/client-go/discovery/fake"
+ "k8s.io/client-go/testing"
+)
+
+// NewSimpleClientset returns a clientset that will respond with the provided objects.
+// It's backed by a very simple object tracker that processes creates, updates and deletions as-is,
+// without applying any validations and/or defaults. It shouldn't be considered a replacement
+// for a real clientset and is mostly useful in simple unit tests.
+func NewSimpleClientset(objects ...runtime.Object) *Clientset {
+ o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder())
+ for _, obj := range objects {
+ if err := o.Add(obj); err != nil {
+ panic(err)
+ }
+ }
+
+ cs := &Clientset{tracker: o}
+ cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake}
+ cs.AddReactor("*", "*", testing.ObjectReaction(o))
+ cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) {
+ gvr := action.GetResource()
+ ns := action.GetNamespace()
+ watch, err := o.Watch(gvr, ns)
+ if err != nil {
+ return false, nil, err
+ }
+ return true, watch, nil
+ })
+
+ return cs
+}
+
+// Clientset implements clientset.Interface. Meant to be embedded into a
+// struct to get a default implementation. This makes faking out just the method
+// you want to test easier.
+type Clientset struct {
+ testing.Fake
+ discovery *fakediscovery.FakeDiscovery
+ tracker testing.ObjectTracker
+}
+
+func (c *Clientset) Discovery() discovery.DiscoveryInterface {
+ return c.discovery
+}
+
+func (c *Clientset) Tracker() testing.ObjectTracker {
+ return c.tracker
+}
+
+var (
+ _ clientset.Interface = &Clientset{}
+ _ testing.FakeClient = &Clientset{}
+)
+
+// CoreV1 retrieves the CoreV1Client
+func (c *Clientset) CoreV1() corev1.CoreV1Interface {
+ return &fakecorev1.FakeCoreV1{Fake: &c.Fake}
+}
+
+// DiscoveryV1beta1 retrieves the DiscoveryV1beta1Client
+func (c *Clientset) DiscoveryV1beta1() discoveryv1beta1.DiscoveryV1beta1Interface {
+ return &fakediscoveryv1beta1.FakeDiscoveryV1beta1{Fake: &c.Fake}
+}
+
+// DiscoveryV1 retrieves the DiscoveryV1Client
+func (c *Clientset) DiscoveryV1() discoveryv1.DiscoveryV1Interface {
+ return &fakediscoveryv1.FakeDiscoveryV1{Fake: &c.Fake}
+}
+
+// NetworkingV1 retrieves the NetworkingV1Client
+func (c *Clientset) NetworkingV1() networkingv1.NetworkingV1Interface {
+ return &fakenetworkingv1.FakeNetworkingV1{Fake: &c.Fake}
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/fake/doc.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/fake/doc.go
new file mode 100644
index 000000000..1da8cc9d5
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/fake/doc.go
@@ -0,0 +1,7 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated fake clientset.
+package fake
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/fake/register.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/fake/register.go
new file mode 100644
index 000000000..8053061f4
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/fake/register.go
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ corev1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1"
+ discoveryv1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1"
+ discoveryv1beta1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1"
+ networkingv1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ serializer "k8s.io/apimachinery/pkg/runtime/serializer"
+ utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+)
+
+var scheme = runtime.NewScheme()
+var codecs = serializer.NewCodecFactory(scheme)
+
+var localSchemeBuilder = runtime.SchemeBuilder{
+ corev1.AddToScheme,
+ discoveryv1beta1.AddToScheme,
+ discoveryv1.AddToScheme,
+ networkingv1.AddToScheme,
+}
+
+// AddToScheme adds all types of this clientset into the given scheme. This allows composition
+// of clientsets, like in:
+//
+// import (
+// "k8s.io/client-go/kubernetes"
+// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
+// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
+// )
+//
+// kclientset, _ := kubernetes.NewForConfig(c)
+// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
+//
+// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
+// correctly.
+var AddToScheme = localSchemeBuilder.AddToScheme
+
+func init() {
+ v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"})
+ utilruntime.Must(AddToScheme(scheme))
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/scheme/doc.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/scheme/doc.go
new file mode 100644
index 000000000..ba3451535
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/scheme/doc.go
@@ -0,0 +1,7 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package contains the scheme of the automatically generated clientset.
+package scheme
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/scheme/register.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/scheme/register.go
new file mode 100644
index 000000000..dd5b051f7
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/scheme/register.go
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package scheme
+
+import (
+ corev1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1"
+ discoveryv1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1"
+ discoveryv1beta1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1"
+ networkingv1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ serializer "k8s.io/apimachinery/pkg/runtime/serializer"
+ utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+)
+
+var Scheme = runtime.NewScheme()
+var Codecs = serializer.NewCodecFactory(Scheme)
+var ParameterCodec = runtime.NewParameterCodec(Scheme)
+var localSchemeBuilder = runtime.SchemeBuilder{
+ corev1.AddToScheme,
+ discoveryv1beta1.AddToScheme,
+ discoveryv1.AddToScheme,
+ networkingv1.AddToScheme,
+}
+
+// AddToScheme adds all types of this clientset into the given scheme. This allows composition
+// of clientsets, like in:
+//
+// import (
+// "k8s.io/client-go/kubernetes"
+// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
+// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
+// )
+//
+// kclientset, _ := kubernetes.NewForConfig(c)
+// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
+//
+// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
+// correctly.
+var AddToScheme = localSchemeBuilder.AddToScheme
+
+func init() {
+ v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"})
+ utilruntime.Must(AddToScheme(Scheme))
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/core_client.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/core_client.go
new file mode 100644
index 000000000..25eab1958
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/core_client.go
@@ -0,0 +1,119 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "net/http"
+
+ v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1"
+ "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/scheme"
+ rest "k8s.io/client-go/rest"
+)
+
+type CoreV1Interface interface {
+ RESTClient() rest.Interface
+ EndpointsGetter
+ NamespacesGetter
+ NodesGetter
+ PodsGetter
+ SecretsGetter
+ ServicesGetter
+}
+
+// CoreV1Client is used to interact with features provided by the group.
+type CoreV1Client struct {
+ restClient rest.Interface
+}
+
+func (c *CoreV1Client) Endpoints(namespace string) EndpointsInterface {
+ return newEndpoints(c, namespace)
+}
+
+func (c *CoreV1Client) Namespaces() NamespaceInterface {
+ return newNamespaces(c)
+}
+
+func (c *CoreV1Client) Nodes() NodeInterface {
+ return newNodes(c)
+}
+
+func (c *CoreV1Client) Pods(namespace string) PodInterface {
+ return newPods(c, namespace)
+}
+
+func (c *CoreV1Client) Secrets(namespace string) SecretInterface {
+ return newSecrets(c, namespace)
+}
+
+func (c *CoreV1Client) Services(namespace string) ServiceInterface {
+ return newServices(c, namespace)
+}
+
+// NewForConfig creates a new CoreV1Client for the given config.
+// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
+// where httpClient was generated with rest.HTTPClientFor(c).
+func NewForConfig(c *rest.Config) (*CoreV1Client, error) {
+ config := *c
+ if err := setConfigDefaults(&config); err != nil {
+ return nil, err
+ }
+ httpClient, err := rest.HTTPClientFor(&config)
+ if err != nil {
+ return nil, err
+ }
+ return NewForConfigAndClient(&config, httpClient)
+}
+
+// NewForConfigAndClient creates a new CoreV1Client for the given config and http client.
+// Note the http client provided takes precedence over the configured transport values.
+func NewForConfigAndClient(c *rest.Config, h *http.Client) (*CoreV1Client, error) {
+ config := *c
+ if err := setConfigDefaults(&config); err != nil {
+ return nil, err
+ }
+ client, err := rest.RESTClientForConfigAndClient(&config, h)
+ if err != nil {
+ return nil, err
+ }
+ return &CoreV1Client{client}, nil
+}
+
+// NewForConfigOrDie creates a new CoreV1Client for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *CoreV1Client {
+ client, err := NewForConfig(c)
+ if err != nil {
+ panic(err)
+ }
+ return client
+}
+
+// New creates a new CoreV1Client for the given RESTClient.
+func New(c rest.Interface) *CoreV1Client {
+ return &CoreV1Client{c}
+}
+
+func setConfigDefaults(config *rest.Config) error {
+ gv := v1.SchemeGroupVersion
+ config.GroupVersion = &gv
+ config.APIPath = "/api"
+ config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+
+ if config.UserAgent == "" {
+ config.UserAgent = rest.DefaultKubernetesUserAgent()
+ }
+
+ return nil
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *CoreV1Client) RESTClient() rest.Interface {
+ if c == nil {
+ return nil
+ }
+ return c.restClient
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/doc.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/doc.go
new file mode 100644
index 000000000..50cfbd485
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/doc.go
@@ -0,0 +1,7 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated typed clients.
+package v1
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/endpoints.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/endpoints.go
new file mode 100644
index 000000000..9f363250e
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/endpoints.go
@@ -0,0 +1,165 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ "time"
+
+ v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1"
+ scheme "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/scheme"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// EndpointsGetter has a method to return a EndpointsInterface.
+// A group's client should implement this interface.
+type EndpointsGetter interface {
+ Endpoints(namespace string) EndpointsInterface
+}
+
+// EndpointsInterface has methods to work with Endpoints resources.
+type EndpointsInterface interface {
+ Create(ctx context.Context, endpoints *v1.Endpoints, opts metav1.CreateOptions) (*v1.Endpoints, error)
+ Update(ctx context.Context, endpoints *v1.Endpoints, opts metav1.UpdateOptions) (*v1.Endpoints, error)
+ Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Endpoints, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*v1.EndpointsList, error)
+ Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Endpoints, err error)
+ EndpointsExpansion
+}
+
+// endpoints implements EndpointsInterface
+type endpoints struct {
+ client rest.Interface
+ ns string
+}
+
+// newEndpoints returns a Endpoints
+func newEndpoints(c *CoreV1Client, namespace string) *endpoints {
+ return &endpoints{
+ client: c.RESTClient(),
+ ns: namespace,
+ }
+}
+
+// Get takes name of the endpoints, and returns the corresponding endpoints object, and an error if there is any.
+func (c *endpoints) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Endpoints, err error) {
+ result = &v1.Endpoints{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("endpoints").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of Endpoints that match those selectors.
+func (c *endpoints) List(ctx context.Context, opts metav1.ListOptions) (result *v1.EndpointsList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1.EndpointsList{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("endpoints").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested endpoints.
+func (c *endpoints) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Namespace(c.ns).
+ Resource("endpoints").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a endpoints and creates it. Returns the server's representation of the endpoints, and an error, if there is any.
+func (c *endpoints) Create(ctx context.Context, endpoints *v1.Endpoints, opts metav1.CreateOptions) (result *v1.Endpoints, err error) {
+ result = &v1.Endpoints{}
+ err = c.client.Post().
+ Namespace(c.ns).
+ Resource("endpoints").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(endpoints).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a endpoints and updates it. Returns the server's representation of the endpoints, and an error, if there is any.
+func (c *endpoints) Update(ctx context.Context, endpoints *v1.Endpoints, opts metav1.UpdateOptions) (result *v1.Endpoints, err error) {
+ result = &v1.Endpoints{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("endpoints").
+ Name(endpoints.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(endpoints).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the endpoints and deletes it. Returns an error if one occurs.
+func (c *endpoints) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("endpoints").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *endpoints) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("endpoints").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched endpoints.
+func (c *endpoints) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Endpoints, err error) {
+ result = &v1.Endpoints{}
+ err = c.client.Patch(pt).
+ Namespace(c.ns).
+ Resource("endpoints").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/fake/doc.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/fake/doc.go
new file mode 100644
index 000000000..57bd090ef
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/fake/doc.go
@@ -0,0 +1,7 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// Package fake has the automatically generated clients.
+package fake
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/fake/fake_core_client.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/fake/fake_core_client.go
new file mode 100644
index 000000000..60176f481
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/fake/fake_core_client.go
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1"
+ rest "k8s.io/client-go/rest"
+ testing "k8s.io/client-go/testing"
+)
+
+type FakeCoreV1 struct {
+ *testing.Fake
+}
+
+func (c *FakeCoreV1) Endpoints(namespace string) v1.EndpointsInterface {
+ return &FakeEndpoints{c, namespace}
+}
+
+func (c *FakeCoreV1) Namespaces() v1.NamespaceInterface {
+ return &FakeNamespaces{c}
+}
+
+func (c *FakeCoreV1) Nodes() v1.NodeInterface {
+ return &FakeNodes{c}
+}
+
+func (c *FakeCoreV1) Pods(namespace string) v1.PodInterface {
+ return &FakePods{c, namespace}
+}
+
+func (c *FakeCoreV1) Secrets(namespace string) v1.SecretInterface {
+ return &FakeSecrets{c, namespace}
+}
+
+func (c *FakeCoreV1) Services(namespace string) v1.ServiceInterface {
+ return &FakeServices{c, namespace}
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *FakeCoreV1) RESTClient() rest.Interface {
+ var ret *rest.RESTClient
+ return ret
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/fake/fake_endpoints.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/fake/fake_endpoints.go
new file mode 100644
index 000000000..9029e198c
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/fake/fake_endpoints.go
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ "context"
+
+ v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeEndpoints implements EndpointsInterface
+type FakeEndpoints struct {
+ Fake *FakeCoreV1
+ ns string
+}
+
+var endpointsResource = v1.SchemeGroupVersion.WithResource("endpoints")
+
+var endpointsKind = v1.SchemeGroupVersion.WithKind("Endpoints")
+
+// Get takes name of the endpoints, and returns the corresponding endpoints object, and an error if there is any.
+func (c *FakeEndpoints) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Endpoints, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(endpointsResource, c.ns, name), &v1.Endpoints{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1.Endpoints), err
+}
+
+// List takes label and field selectors, and returns the list of Endpoints that match those selectors.
+func (c *FakeEndpoints) List(ctx context.Context, opts metav1.ListOptions) (result *v1.EndpointsList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(endpointsResource, endpointsKind, c.ns, opts), &v1.EndpointsList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1.EndpointsList{ListMeta: obj.(*v1.EndpointsList).ListMeta}
+ for _, item := range obj.(*v1.EndpointsList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested endpoints.
+func (c *FakeEndpoints) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(endpointsResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a endpoints and creates it. Returns the server's representation of the endpoints, and an error, if there is any.
+func (c *FakeEndpoints) Create(ctx context.Context, endpoints *v1.Endpoints, opts metav1.CreateOptions) (result *v1.Endpoints, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(endpointsResource, c.ns, endpoints), &v1.Endpoints{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1.Endpoints), err
+}
+
+// Update takes the representation of a endpoints and updates it. Returns the server's representation of the endpoints, and an error, if there is any.
+func (c *FakeEndpoints) Update(ctx context.Context, endpoints *v1.Endpoints, opts metav1.UpdateOptions) (result *v1.Endpoints, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(endpointsResource, c.ns, endpoints), &v1.Endpoints{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1.Endpoints), err
+}
+
+// Delete takes name of the endpoints and deletes it. Returns an error if one occurs.
+func (c *FakeEndpoints) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteActionWithOptions(endpointsResource, c.ns, name, opts), &v1.Endpoints{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeEndpoints) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(endpointsResource, c.ns, listOpts)
+
+ _, err := c.Fake.Invokes(action, &v1.EndpointsList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched endpoints.
+func (c *FakeEndpoints) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Endpoints, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(endpointsResource, c.ns, name, pt, data, subresources...), &v1.Endpoints{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1.Endpoints), err
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/fake/fake_namespace.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/fake/fake_namespace.go
new file mode 100644
index 000000000..811546583
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/fake/fake_namespace.go
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ "context"
+
+ v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeNamespaces implements NamespaceInterface
+type FakeNamespaces struct {
+ Fake *FakeCoreV1
+}
+
+var namespacesResource = v1.SchemeGroupVersion.WithResource("namespaces")
+
+var namespacesKind = v1.SchemeGroupVersion.WithKind("Namespace")
+
+// Get takes name of the namespace, and returns the corresponding namespace object, and an error if there is any.
+func (c *FakeNamespaces) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Namespace, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootGetAction(namespacesResource, name), &v1.Namespace{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1.Namespace), err
+}
+
+// List takes label and field selectors, and returns the list of Namespaces that match those selectors.
+func (c *FakeNamespaces) List(ctx context.Context, opts metav1.ListOptions) (result *v1.NamespaceList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootListAction(namespacesResource, namespacesKind, opts), &v1.NamespaceList{})
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1.NamespaceList{ListMeta: obj.(*v1.NamespaceList).ListMeta}
+ for _, item := range obj.(*v1.NamespaceList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested namespaces.
+func (c *FakeNamespaces) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewRootWatchAction(namespacesResource, opts))
+}
+
+// Create takes the representation of a namespace and creates it. Returns the server's representation of the namespace, and an error, if there is any.
+func (c *FakeNamespaces) Create(ctx context.Context, namespace *v1.Namespace, opts metav1.CreateOptions) (result *v1.Namespace, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootCreateAction(namespacesResource, namespace), &v1.Namespace{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1.Namespace), err
+}
+
+// Update takes the representation of a namespace and updates it. Returns the server's representation of the namespace, and an error, if there is any.
+func (c *FakeNamespaces) Update(ctx context.Context, namespace *v1.Namespace, opts metav1.UpdateOptions) (result *v1.Namespace, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootUpdateAction(namespacesResource, namespace), &v1.Namespace{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1.Namespace), err
+}
+
+// Delete takes name of the namespace and deletes it. Returns an error if one occurs.
+func (c *FakeNamespaces) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewRootDeleteActionWithOptions(namespacesResource, name, opts), &v1.Namespace{})
+ return err
+}
+
+// Patch applies the patch and returns the patched namespace.
+func (c *FakeNamespaces) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Namespace, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootPatchSubresourceAction(namespacesResource, name, pt, data, subresources...), &v1.Namespace{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1.Namespace), err
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/fake/fake_node.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/fake/fake_node.go
new file mode 100644
index 000000000..316a90842
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/fake/fake_node.go
@@ -0,0 +1,119 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ "context"
+
+ v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeNodes implements NodeInterface
+type FakeNodes struct {
+ Fake *FakeCoreV1
+}
+
+var nodesResource = v1.SchemeGroupVersion.WithResource("nodes")
+
+var nodesKind = v1.SchemeGroupVersion.WithKind("Node")
+
+// Get takes name of the node, and returns the corresponding node object, and an error if there is any.
+func (c *FakeNodes) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Node, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootGetAction(nodesResource, name), &v1.Node{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1.Node), err
+}
+
+// List takes label and field selectors, and returns the list of Nodes that match those selectors.
+func (c *FakeNodes) List(ctx context.Context, opts metav1.ListOptions) (result *v1.NodeList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootListAction(nodesResource, nodesKind, opts), &v1.NodeList{})
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1.NodeList{ListMeta: obj.(*v1.NodeList).ListMeta}
+ for _, item := range obj.(*v1.NodeList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested nodes.
+func (c *FakeNodes) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewRootWatchAction(nodesResource, opts))
+}
+
+// Create takes the representation of a node and creates it. Returns the server's representation of the node, and an error, if there is any.
+func (c *FakeNodes) Create(ctx context.Context, node *v1.Node, opts metav1.CreateOptions) (result *v1.Node, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootCreateAction(nodesResource, node), &v1.Node{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1.Node), err
+}
+
+// Update takes the representation of a node and updates it. Returns the server's representation of the node, and an error, if there is any.
+func (c *FakeNodes) Update(ctx context.Context, node *v1.Node, opts metav1.UpdateOptions) (result *v1.Node, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootUpdateAction(nodesResource, node), &v1.Node{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1.Node), err
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *FakeNodes) UpdateStatus(ctx context.Context, node *v1.Node, opts metav1.UpdateOptions) (*v1.Node, error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootUpdateSubresourceAction(nodesResource, "status", node), &v1.Node{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1.Node), err
+}
+
+// Delete takes name of the node and deletes it. Returns an error if one occurs.
+func (c *FakeNodes) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewRootDeleteActionWithOptions(nodesResource, name, opts), &v1.Node{})
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeNodes) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
+ action := testing.NewRootDeleteCollectionAction(nodesResource, listOpts)
+
+ _, err := c.Fake.Invokes(action, &v1.NodeList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched node.
+func (c *FakeNodes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Node, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootPatchSubresourceAction(nodesResource, name, pt, data, subresources...), &v1.Node{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1.Node), err
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/fake/fake_pod.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/fake/fake_pod.go
new file mode 100644
index 000000000..dca3fe6f0
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/fake/fake_pod.go
@@ -0,0 +1,139 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ "context"
+
+ v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakePods implements PodInterface
+type FakePods struct {
+ Fake *FakeCoreV1
+ ns string
+}
+
+var podsResource = v1.SchemeGroupVersion.WithResource("pods")
+
+var podsKind = v1.SchemeGroupVersion.WithKind("Pod")
+
+// Get takes name of the pod, and returns the corresponding pod object, and an error if there is any.
+func (c *FakePods) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Pod, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(podsResource, c.ns, name), &v1.Pod{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1.Pod), err
+}
+
+// List takes label and field selectors, and returns the list of Pods that match those selectors.
+func (c *FakePods) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PodList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(podsResource, podsKind, c.ns, opts), &v1.PodList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1.PodList{ListMeta: obj.(*v1.PodList).ListMeta}
+ for _, item := range obj.(*v1.PodList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested pods.
+func (c *FakePods) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(podsResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a pod and creates it. Returns the server's representation of the pod, and an error, if there is any.
+func (c *FakePods) Create(ctx context.Context, pod *v1.Pod, opts metav1.CreateOptions) (result *v1.Pod, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(podsResource, c.ns, pod), &v1.Pod{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1.Pod), err
+}
+
+// Update takes the representation of a pod and updates it. Returns the server's representation of the pod, and an error, if there is any.
+func (c *FakePods) Update(ctx context.Context, pod *v1.Pod, opts metav1.UpdateOptions) (result *v1.Pod, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(podsResource, c.ns, pod), &v1.Pod{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1.Pod), err
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *FakePods) UpdateStatus(ctx context.Context, pod *v1.Pod, opts metav1.UpdateOptions) (*v1.Pod, error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateSubresourceAction(podsResource, "status", c.ns, pod), &v1.Pod{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1.Pod), err
+}
+
+// Delete takes name of the pod and deletes it. Returns an error if one occurs.
+func (c *FakePods) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteActionWithOptions(podsResource, c.ns, name, opts), &v1.Pod{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakePods) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(podsResource, c.ns, listOpts)
+
+ _, err := c.Fake.Invokes(action, &v1.PodList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched pod.
+func (c *FakePods) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Pod, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(podsResource, c.ns, name, pt, data, subresources...), &v1.Pod{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1.Pod), err
+}
+
+// UpdateEphemeralContainers takes the representation of a pod and updates it. Returns the server's representation of the pod, and an error, if there is any.
+func (c *FakePods) UpdateEphemeralContainers(ctx context.Context, podName string, pod *v1.Pod, opts metav1.UpdateOptions) (result *v1.Pod, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateSubresourceAction(podsResource, "ephemeralcontainers", c.ns, pod), &v1.Pod{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1.Pod), err
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/fake/fake_secret.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/fake/fake_secret.go
new file mode 100644
index 000000000..964116565
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/fake/fake_secret.go
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ "context"
+
+ v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeSecrets implements SecretInterface
+type FakeSecrets struct {
+ Fake *FakeCoreV1
+ ns string
+}
+
+var secretsResource = v1.SchemeGroupVersion.WithResource("secrets")
+
+var secretsKind = v1.SchemeGroupVersion.WithKind("Secret")
+
+// Get takes name of the secret, and returns the corresponding secret object, and an error if there is any.
+func (c *FakeSecrets) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Secret, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(secretsResource, c.ns, name), &v1.Secret{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1.Secret), err
+}
+
+// List takes label and field selectors, and returns the list of Secrets that match those selectors.
+func (c *FakeSecrets) List(ctx context.Context, opts metav1.ListOptions) (result *v1.SecretList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(secretsResource, secretsKind, c.ns, opts), &v1.SecretList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1.SecretList{ListMeta: obj.(*v1.SecretList).ListMeta}
+ for _, item := range obj.(*v1.SecretList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested secrets.
+func (c *FakeSecrets) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(secretsResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a secret and creates it. Returns the server's representation of the secret, and an error, if there is any.
+func (c *FakeSecrets) Create(ctx context.Context, secret *v1.Secret, opts metav1.CreateOptions) (result *v1.Secret, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(secretsResource, c.ns, secret), &v1.Secret{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1.Secret), err
+}
+
+// Update takes the representation of a secret and updates it. Returns the server's representation of the secret, and an error, if there is any.
+func (c *FakeSecrets) Update(ctx context.Context, secret *v1.Secret, opts metav1.UpdateOptions) (result *v1.Secret, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(secretsResource, c.ns, secret), &v1.Secret{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1.Secret), err
+}
+
+// Delete takes name of the secret and deletes it. Returns an error if one occurs.
+func (c *FakeSecrets) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteActionWithOptions(secretsResource, c.ns, name, opts), &v1.Secret{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeSecrets) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(secretsResource, c.ns, listOpts)
+
+ _, err := c.Fake.Invokes(action, &v1.SecretList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched secret.
+func (c *FakeSecrets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Secret, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(secretsResource, c.ns, name, pt, data, subresources...), &v1.Secret{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1.Secret), err
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/fake/fake_service.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/fake/fake_service.go
new file mode 100644
index 000000000..a68cb6067
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/fake/fake_service.go
@@ -0,0 +1,120 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ "context"
+
+ v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeServices implements ServiceInterface
+type FakeServices struct {
+ Fake *FakeCoreV1
+ ns string
+}
+
+var servicesResource = v1.SchemeGroupVersion.WithResource("services")
+
+var servicesKind = v1.SchemeGroupVersion.WithKind("Service")
+
+// Get takes name of the service, and returns the corresponding service object, and an error if there is any.
+func (c *FakeServices) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Service, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(servicesResource, c.ns, name), &v1.Service{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1.Service), err
+}
+
+// List takes label and field selectors, and returns the list of Services that match those selectors.
+func (c *FakeServices) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ServiceList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(servicesResource, servicesKind, c.ns, opts), &v1.ServiceList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1.ServiceList{ListMeta: obj.(*v1.ServiceList).ListMeta}
+ for _, item := range obj.(*v1.ServiceList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested services.
+func (c *FakeServices) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(servicesResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a service and creates it. Returns the server's representation of the service, and an error, if there is any.
+func (c *FakeServices) Create(ctx context.Context, service *v1.Service, opts metav1.CreateOptions) (result *v1.Service, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(servicesResource, c.ns, service), &v1.Service{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1.Service), err
+}
+
+// Update takes the representation of a service and updates it. Returns the server's representation of the service, and an error, if there is any.
+func (c *FakeServices) Update(ctx context.Context, service *v1.Service, opts metav1.UpdateOptions) (result *v1.Service, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(servicesResource, c.ns, service), &v1.Service{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1.Service), err
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *FakeServices) UpdateStatus(ctx context.Context, service *v1.Service, opts metav1.UpdateOptions) (*v1.Service, error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateSubresourceAction(servicesResource, "status", c.ns, service), &v1.Service{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1.Service), err
+}
+
+// Delete takes name of the service and deletes it. Returns an error if one occurs.
+func (c *FakeServices) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteActionWithOptions(servicesResource, c.ns, name, opts), &v1.Service{})
+
+ return err
+}
+
+// Patch applies the patch and returns the patched service.
+func (c *FakeServices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Service, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(servicesResource, c.ns, name, pt, data, subresources...), &v1.Service{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1.Service), err
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/generated_expansion.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/generated_expansion.go
new file mode 100644
index 000000000..4245920ba
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/generated_expansion.go
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+type EndpointsExpansion interface{}
+
+type NamespaceExpansion interface{}
+
+type NodeExpansion interface{}
+
+type PodExpansion interface{}
+
+type SecretExpansion interface{}
+
+type ServiceExpansion interface{}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/namespace.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/namespace.go
new file mode 100644
index 000000000..7fea2416d
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/namespace.go
@@ -0,0 +1,139 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ "time"
+
+ v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1"
+ scheme "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/scheme"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// NamespacesGetter has a method to return a NamespaceInterface.
+// A group's client should implement this interface.
+type NamespacesGetter interface {
+ Namespaces() NamespaceInterface
+}
+
+// NamespaceInterface has methods to work with Namespace resources.
+type NamespaceInterface interface {
+ Create(ctx context.Context, namespace *v1.Namespace, opts metav1.CreateOptions) (*v1.Namespace, error)
+ Update(ctx context.Context, namespace *v1.Namespace, opts metav1.UpdateOptions) (*v1.Namespace, error)
+ Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Namespace, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*v1.NamespaceList, error)
+ Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Namespace, err error)
+ NamespaceExpansion
+}
+
+// namespaces implements NamespaceInterface
+type namespaces struct {
+ client rest.Interface
+}
+
+// newNamespaces returns a Namespaces
+func newNamespaces(c *CoreV1Client) *namespaces {
+ return &namespaces{
+ client: c.RESTClient(),
+ }
+}
+
+// Get takes name of the namespace, and returns the corresponding namespace object, and an error if there is any.
+func (c *namespaces) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Namespace, err error) {
+ result = &v1.Namespace{}
+ err = c.client.Get().
+ Resource("namespaces").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of Namespaces that match those selectors.
+func (c *namespaces) List(ctx context.Context, opts metav1.ListOptions) (result *v1.NamespaceList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1.NamespaceList{}
+ err = c.client.Get().
+ Resource("namespaces").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested namespaces.
+func (c *namespaces) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Resource("namespaces").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a namespace and creates it. Returns the server's representation of the namespace, and an error, if there is any.
+func (c *namespaces) Create(ctx context.Context, namespace *v1.Namespace, opts metav1.CreateOptions) (result *v1.Namespace, err error) {
+ result = &v1.Namespace{}
+ err = c.client.Post().
+ Resource("namespaces").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(namespace).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a namespace and updates it. Returns the server's representation of the namespace, and an error, if there is any.
+func (c *namespaces) Update(ctx context.Context, namespace *v1.Namespace, opts metav1.UpdateOptions) (result *v1.Namespace, err error) {
+ result = &v1.Namespace{}
+ err = c.client.Put().
+ Resource("namespaces").
+ Name(namespace.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(namespace).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the namespace and deletes it. Returns an error if one occurs.
+func (c *namespaces) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
+ return c.client.Delete().
+ Resource("namespaces").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched namespace.
+func (c *namespaces) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Namespace, err error) {
+ result = &v1.Namespace{}
+ err = c.client.Patch(pt).
+ Resource("namespaces").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/node.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/node.go
new file mode 100644
index 000000000..6d2e916cf
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/node.go
@@ -0,0 +1,171 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ "time"
+
+ v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1"
+ scheme "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/scheme"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// NodesGetter has a method to return a NodeInterface.
+// A group's client should implement this interface.
+type NodesGetter interface {
+ Nodes() NodeInterface
+}
+
+// NodeInterface has methods to work with Node resources.
+type NodeInterface interface {
+ Create(ctx context.Context, node *v1.Node, opts metav1.CreateOptions) (*v1.Node, error)
+ Update(ctx context.Context, node *v1.Node, opts metav1.UpdateOptions) (*v1.Node, error)
+ UpdateStatus(ctx context.Context, node *v1.Node, opts metav1.UpdateOptions) (*v1.Node, error)
+ Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Node, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*v1.NodeList, error)
+ Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Node, err error)
+ NodeExpansion
+}
+
+// nodes implements NodeInterface
+type nodes struct {
+ client rest.Interface
+}
+
+// newNodes returns a Nodes
+func newNodes(c *CoreV1Client) *nodes {
+ return &nodes{
+ client: c.RESTClient(),
+ }
+}
+
+// Get takes name of the node, and returns the corresponding node object, and an error if there is any.
+func (c *nodes) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Node, err error) {
+ result = &v1.Node{}
+ err = c.client.Get().
+ Resource("nodes").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of Nodes that match those selectors.
+func (c *nodes) List(ctx context.Context, opts metav1.ListOptions) (result *v1.NodeList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1.NodeList{}
+ err = c.client.Get().
+ Resource("nodes").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested nodes.
+func (c *nodes) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Resource("nodes").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a node and creates it. Returns the server's representation of the node, and an error, if there is any.
+func (c *nodes) Create(ctx context.Context, node *v1.Node, opts metav1.CreateOptions) (result *v1.Node, err error) {
+ result = &v1.Node{}
+ err = c.client.Post().
+ Resource("nodes").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(node).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a node and updates it. Returns the server's representation of the node, and an error, if there is any.
+func (c *nodes) Update(ctx context.Context, node *v1.Node, opts metav1.UpdateOptions) (result *v1.Node, err error) {
+ result = &v1.Node{}
+ err = c.client.Put().
+ Resource("nodes").
+ Name(node.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(node).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *nodes) UpdateStatus(ctx context.Context, node *v1.Node, opts metav1.UpdateOptions) (result *v1.Node, err error) {
+ result = &v1.Node{}
+ err = c.client.Put().
+ Resource("nodes").
+ Name(node.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(node).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the node and deletes it. Returns an error if one occurs.
+func (c *nodes) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
+ return c.client.Delete().
+ Resource("nodes").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *nodes) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Resource("nodes").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched node.
+func (c *nodes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Node, err error) {
+ result = &v1.Node{}
+ err = c.client.Patch(pt).
+ Resource("nodes").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/pod.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/pod.go
new file mode 100644
index 000000000..f46d4d7c3
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/pod.go
@@ -0,0 +1,199 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ "time"
+
+ v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1"
+ scheme "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/scheme"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// PodsGetter has a method to return a PodInterface.
+// A group's client should implement this interface.
+type PodsGetter interface {
+ Pods(namespace string) PodInterface
+}
+
+// PodInterface has methods to work with Pod resources.
+type PodInterface interface {
+ Create(ctx context.Context, pod *v1.Pod, opts metav1.CreateOptions) (*v1.Pod, error)
+ Update(ctx context.Context, pod *v1.Pod, opts metav1.UpdateOptions) (*v1.Pod, error)
+ UpdateStatus(ctx context.Context, pod *v1.Pod, opts metav1.UpdateOptions) (*v1.Pod, error)
+ Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Pod, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*v1.PodList, error)
+ Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Pod, err error)
+ UpdateEphemeralContainers(ctx context.Context, podName string, pod *v1.Pod, opts metav1.UpdateOptions) (*v1.Pod, error)
+
+ PodExpansion
+}
+
+// pods implements PodInterface
+type pods struct {
+ client rest.Interface
+ ns string
+}
+
+// newPods returns a Pods
+func newPods(c *CoreV1Client, namespace string) *pods {
+ return &pods{
+ client: c.RESTClient(),
+ ns: namespace,
+ }
+}
+
+// Get takes name of the pod, and returns the corresponding pod object, and an error if there is any.
+func (c *pods) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Pod, err error) {
+ result = &v1.Pod{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("pods").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of Pods that match those selectors.
+func (c *pods) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PodList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1.PodList{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("pods").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested pods.
+func (c *pods) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Namespace(c.ns).
+ Resource("pods").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a pod and creates it. Returns the server's representation of the pod, and an error, if there is any.
+func (c *pods) Create(ctx context.Context, pod *v1.Pod, opts metav1.CreateOptions) (result *v1.Pod, err error) {
+ result = &v1.Pod{}
+ err = c.client.Post().
+ Namespace(c.ns).
+ Resource("pods").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(pod).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a pod and updates it. Returns the server's representation of the pod, and an error, if there is any.
+func (c *pods) Update(ctx context.Context, pod *v1.Pod, opts metav1.UpdateOptions) (result *v1.Pod, err error) {
+ result = &v1.Pod{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("pods").
+ Name(pod.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(pod).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *pods) UpdateStatus(ctx context.Context, pod *v1.Pod, opts metav1.UpdateOptions) (result *v1.Pod, err error) {
+ result = &v1.Pod{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("pods").
+ Name(pod.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(pod).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the pod and deletes it. Returns an error if one occurs.
+func (c *pods) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("pods").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *pods) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("pods").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched pod.
+func (c *pods) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Pod, err error) {
+ result = &v1.Pod{}
+ err = c.client.Patch(pt).
+ Namespace(c.ns).
+ Resource("pods").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateEphemeralContainers takes the top resource name and the representation of a pod and updates it. Returns the server's representation of the pod, and an error, if there is any.
+func (c *pods) UpdateEphemeralContainers(ctx context.Context, podName string, pod *v1.Pod, opts metav1.UpdateOptions) (result *v1.Pod, err error) {
+ result = &v1.Pod{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("pods").
+ Name(podName).
+ SubResource("ephemeralcontainers").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(pod).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/secret.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/secret.go
new file mode 100644
index 000000000..ccbf30c16
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/secret.go
@@ -0,0 +1,165 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ "time"
+
+ v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1"
+ scheme "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/scheme"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// SecretsGetter has a method to return a SecretInterface.
+// A group's client should implement this interface.
+type SecretsGetter interface {
+ Secrets(namespace string) SecretInterface
+}
+
+// SecretInterface has methods to work with Secret resources.
+type SecretInterface interface {
+ Create(ctx context.Context, secret *v1.Secret, opts metav1.CreateOptions) (*v1.Secret, error)
+ Update(ctx context.Context, secret *v1.Secret, opts metav1.UpdateOptions) (*v1.Secret, error)
+ Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Secret, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*v1.SecretList, error)
+ Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Secret, err error)
+ SecretExpansion
+}
+
+// secrets implements SecretInterface
+type secrets struct {
+ client rest.Interface
+ ns string
+}
+
+// newSecrets returns a Secrets
+func newSecrets(c *CoreV1Client, namespace string) *secrets {
+ return &secrets{
+ client: c.RESTClient(),
+ ns: namespace,
+ }
+}
+
+// Get takes name of the secret, and returns the corresponding secret object, and an error if there is any.
+func (c *secrets) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Secret, err error) {
+ result = &v1.Secret{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("secrets").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of Secrets that match those selectors.
+func (c *secrets) List(ctx context.Context, opts metav1.ListOptions) (result *v1.SecretList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1.SecretList{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("secrets").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested secrets.
+func (c *secrets) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Namespace(c.ns).
+ Resource("secrets").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a secret and creates it. Returns the server's representation of the secret, and an error, if there is any.
+func (c *secrets) Create(ctx context.Context, secret *v1.Secret, opts metav1.CreateOptions) (result *v1.Secret, err error) {
+ result = &v1.Secret{}
+ err = c.client.Post().
+ Namespace(c.ns).
+ Resource("secrets").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(secret).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a secret and updates it. Returns the server's representation of the secret, and an error, if there is any.
+func (c *secrets) Update(ctx context.Context, secret *v1.Secret, opts metav1.UpdateOptions) (result *v1.Secret, err error) {
+ result = &v1.Secret{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("secrets").
+ Name(secret.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(secret).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the secret and deletes it. Returns an error if one occurs.
+func (c *secrets) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("secrets").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *secrets) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("secrets").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched secret.
+func (c *secrets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Secret, err error) {
+ result = &v1.Secret{}
+ err = c.client.Patch(pt).
+ Namespace(c.ns).
+ Resource("secrets").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/service.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/service.go
new file mode 100644
index 000000000..b981e1979
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1/service.go
@@ -0,0 +1,165 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ "time"
+
+ v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1"
+ scheme "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/scheme"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// ServicesGetter has a method to return a ServiceInterface.
+// A group's client should implement this interface.
+type ServicesGetter interface {
+ Services(namespace string) ServiceInterface
+}
+
+// ServiceInterface has methods to work with Service resources.
+type ServiceInterface interface {
+ Create(ctx context.Context, service *v1.Service, opts metav1.CreateOptions) (*v1.Service, error)
+ Update(ctx context.Context, service *v1.Service, opts metav1.UpdateOptions) (*v1.Service, error)
+ UpdateStatus(ctx context.Context, service *v1.Service, opts metav1.UpdateOptions) (*v1.Service, error)
+ Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Service, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*v1.ServiceList, error)
+ Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Service, err error)
+ ServiceExpansion
+}
+
+// services implements ServiceInterface
+type services struct {
+ client rest.Interface
+ ns string
+}
+
+// newServices returns a Services
+func newServices(c *CoreV1Client, namespace string) *services {
+ return &services{
+ client: c.RESTClient(),
+ ns: namespace,
+ }
+}
+
+// Get takes name of the service, and returns the corresponding service object, and an error if there is any.
+func (c *services) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Service, err error) {
+ result = &v1.Service{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("services").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of Services that match those selectors.
+func (c *services) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ServiceList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1.ServiceList{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("services").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested services.
+func (c *services) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Namespace(c.ns).
+ Resource("services").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a service and creates it. Returns the server's representation of the service, and an error, if there is any.
+func (c *services) Create(ctx context.Context, service *v1.Service, opts metav1.CreateOptions) (result *v1.Service, err error) {
+ result = &v1.Service{}
+ err = c.client.Post().
+ Namespace(c.ns).
+ Resource("services").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(service).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a service and updates it. Returns the server's representation of the service, and an error, if there is any.
+func (c *services) Update(ctx context.Context, service *v1.Service, opts metav1.UpdateOptions) (result *v1.Service, err error) {
+ result = &v1.Service{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("services").
+ Name(service.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(service).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *services) UpdateStatus(ctx context.Context, service *v1.Service, opts metav1.UpdateOptions) (result *v1.Service, err error) {
+ result = &v1.Service{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("services").
+ Name(service.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(service).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the service and deletes it. Returns an error if one occurs.
+func (c *services) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("services").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched service.
+func (c *services) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Service, err error) {
+ result = &v1.Service{}
+ err = c.client.Patch(pt).
+ Namespace(c.ns).
+ Resource("services").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1/discovery_client.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1/discovery_client.go
new file mode 100644
index 000000000..ca9ef1187
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1/discovery_client.go
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "net/http"
+
+ v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1"
+ "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/scheme"
+ rest "k8s.io/client-go/rest"
+)
+
+type DiscoveryV1Interface interface {
+ RESTClient() rest.Interface
+ EndpointSlicesGetter
+}
+
+// DiscoveryV1Client is used to interact with features provided by the discovery.k8s.io group.
+type DiscoveryV1Client struct {
+ restClient rest.Interface
+}
+
+func (c *DiscoveryV1Client) EndpointSlices(namespace string) EndpointSliceInterface {
+ return newEndpointSlices(c, namespace)
+}
+
+// NewForConfig creates a new DiscoveryV1Client for the given config.
+// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
+// where httpClient was generated with rest.HTTPClientFor(c).
+func NewForConfig(c *rest.Config) (*DiscoveryV1Client, error) {
+ config := *c
+ if err := setConfigDefaults(&config); err != nil {
+ return nil, err
+ }
+ httpClient, err := rest.HTTPClientFor(&config)
+ if err != nil {
+ return nil, err
+ }
+ return NewForConfigAndClient(&config, httpClient)
+}
+
+// NewForConfigAndClient creates a new DiscoveryV1Client for the given config and http client.
+// Note the http client provided takes precedence over the configured transport values.
+func NewForConfigAndClient(c *rest.Config, h *http.Client) (*DiscoveryV1Client, error) {
+ config := *c
+ if err := setConfigDefaults(&config); err != nil {
+ return nil, err
+ }
+ client, err := rest.RESTClientForConfigAndClient(&config, h)
+ if err != nil {
+ return nil, err
+ }
+ return &DiscoveryV1Client{client}, nil
+}
+
+// NewForConfigOrDie creates a new DiscoveryV1Client for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *DiscoveryV1Client {
+ client, err := NewForConfig(c)
+ if err != nil {
+ panic(err)
+ }
+ return client
+}
+
+// New creates a new DiscoveryV1Client for the given RESTClient.
+func New(c rest.Interface) *DiscoveryV1Client {
+ return &DiscoveryV1Client{c}
+}
+
+func setConfigDefaults(config *rest.Config) error {
+ gv := v1.SchemeGroupVersion
+ config.GroupVersion = &gv
+ config.APIPath = "/apis"
+ config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+
+ if config.UserAgent == "" {
+ config.UserAgent = rest.DefaultKubernetesUserAgent()
+ }
+
+ return nil
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *DiscoveryV1Client) RESTClient() rest.Interface {
+ if c == nil {
+ return nil
+ }
+ return c.restClient
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1/doc.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1/doc.go
new file mode 100644
index 000000000..50cfbd485
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1/doc.go
@@ -0,0 +1,7 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated typed clients.
+package v1
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1/endpointslice.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1/endpointslice.go
new file mode 100644
index 000000000..cfb4ab528
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1/endpointslice.go
@@ -0,0 +1,165 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ "time"
+
+ v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1"
+ scheme "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/scheme"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// EndpointSlicesGetter has a method to return a EndpointSliceInterface.
+// A group's client should implement this interface.
+type EndpointSlicesGetter interface {
+ EndpointSlices(namespace string) EndpointSliceInterface
+}
+
+// EndpointSliceInterface has methods to work with EndpointSlice resources.
+type EndpointSliceInterface interface {
+ Create(ctx context.Context, endpointSlice *v1.EndpointSlice, opts metav1.CreateOptions) (*v1.EndpointSlice, error)
+ Update(ctx context.Context, endpointSlice *v1.EndpointSlice, opts metav1.UpdateOptions) (*v1.EndpointSlice, error)
+ Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.EndpointSlice, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*v1.EndpointSliceList, error)
+ Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.EndpointSlice, err error)
+ EndpointSliceExpansion
+}
+
+// endpointSlices implements EndpointSliceInterface
+type endpointSlices struct {
+ client rest.Interface
+ ns string
+}
+
+// newEndpointSlices returns a EndpointSlices
+func newEndpointSlices(c *DiscoveryV1Client, namespace string) *endpointSlices {
+ return &endpointSlices{
+ client: c.RESTClient(),
+ ns: namespace,
+ }
+}
+
+// Get takes name of the endpointSlice, and returns the corresponding endpointSlice object, and an error if there is any.
+func (c *endpointSlices) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.EndpointSlice, err error) {
+ result = &v1.EndpointSlice{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("endpointslices").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of EndpointSlices that match those selectors.
+func (c *endpointSlices) List(ctx context.Context, opts metav1.ListOptions) (result *v1.EndpointSliceList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1.EndpointSliceList{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("endpointslices").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested endpointSlices.
+func (c *endpointSlices) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Namespace(c.ns).
+ Resource("endpointslices").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a endpointSlice and creates it. Returns the server's representation of the endpointSlice, and an error, if there is any.
+func (c *endpointSlices) Create(ctx context.Context, endpointSlice *v1.EndpointSlice, opts metav1.CreateOptions) (result *v1.EndpointSlice, err error) {
+ result = &v1.EndpointSlice{}
+ err = c.client.Post().
+ Namespace(c.ns).
+ Resource("endpointslices").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(endpointSlice).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a endpointSlice and updates it. Returns the server's representation of the endpointSlice, and an error, if there is any.
+func (c *endpointSlices) Update(ctx context.Context, endpointSlice *v1.EndpointSlice, opts metav1.UpdateOptions) (result *v1.EndpointSlice, err error) {
+ result = &v1.EndpointSlice{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("endpointslices").
+ Name(endpointSlice.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(endpointSlice).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the endpointSlice and deletes it. Returns an error if one occurs.
+func (c *endpointSlices) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("endpointslices").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *endpointSlices) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("endpointslices").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched endpointSlice.
+func (c *endpointSlices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.EndpointSlice, err error) {
+ result = &v1.EndpointSlice{}
+ err = c.client.Patch(pt).
+ Namespace(c.ns).
+ Resource("endpointslices").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1/fake/doc.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1/fake/doc.go
new file mode 100644
index 000000000..57bd090ef
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1/fake/doc.go
@@ -0,0 +1,7 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// Package fake has the automatically generated clients.
+package fake
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1/fake/fake_discovery_client.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1/fake/fake_discovery_client.go
new file mode 100644
index 000000000..0cee06b99
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1/fake/fake_discovery_client.go
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1"
+ rest "k8s.io/client-go/rest"
+ testing "k8s.io/client-go/testing"
+)
+
+type FakeDiscoveryV1 struct {
+ *testing.Fake
+}
+
+func (c *FakeDiscoveryV1) EndpointSlices(namespace string) v1.EndpointSliceInterface {
+ return &FakeEndpointSlices{c, namespace}
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *FakeDiscoveryV1) RESTClient() rest.Interface {
+ var ret *rest.RESTClient
+ return ret
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1/fake/fake_endpointslice.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1/fake/fake_endpointslice.go
new file mode 100644
index 000000000..d04fdcf47
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1/fake/fake_endpointslice.go
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ "context"
+
+ v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeEndpointSlices implements EndpointSliceInterface
+type FakeEndpointSlices struct {
+ Fake *FakeDiscoveryV1
+ ns string
+}
+
+var endpointslicesResource = v1.SchemeGroupVersion.WithResource("endpointslices")
+
+var endpointslicesKind = v1.SchemeGroupVersion.WithKind("EndpointSlice")
+
+// Get takes name of the endpointSlice, and returns the corresponding endpointSlice object, and an error if there is any.
+func (c *FakeEndpointSlices) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.EndpointSlice, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(endpointslicesResource, c.ns, name), &v1.EndpointSlice{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1.EndpointSlice), err
+}
+
+// List takes label and field selectors, and returns the list of EndpointSlices that match those selectors.
+func (c *FakeEndpointSlices) List(ctx context.Context, opts metav1.ListOptions) (result *v1.EndpointSliceList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(endpointslicesResource, endpointslicesKind, c.ns, opts), &v1.EndpointSliceList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1.EndpointSliceList{ListMeta: obj.(*v1.EndpointSliceList).ListMeta}
+ for _, item := range obj.(*v1.EndpointSliceList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested endpointSlices.
+func (c *FakeEndpointSlices) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(endpointslicesResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a endpointSlice and creates it. Returns the server's representation of the endpointSlice, and an error, if there is any.
+func (c *FakeEndpointSlices) Create(ctx context.Context, endpointSlice *v1.EndpointSlice, opts metav1.CreateOptions) (result *v1.EndpointSlice, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(endpointslicesResource, c.ns, endpointSlice), &v1.EndpointSlice{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1.EndpointSlice), err
+}
+
+// Update takes the representation of a endpointSlice and updates it. Returns the server's representation of the endpointSlice, and an error, if there is any.
+func (c *FakeEndpointSlices) Update(ctx context.Context, endpointSlice *v1.EndpointSlice, opts metav1.UpdateOptions) (result *v1.EndpointSlice, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(endpointslicesResource, c.ns, endpointSlice), &v1.EndpointSlice{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1.EndpointSlice), err
+}
+
+// Delete takes name of the endpointSlice and deletes it. Returns an error if one occurs.
+func (c *FakeEndpointSlices) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteActionWithOptions(endpointslicesResource, c.ns, name, opts), &v1.EndpointSlice{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeEndpointSlices) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(endpointslicesResource, c.ns, listOpts)
+
+ _, err := c.Fake.Invokes(action, &v1.EndpointSliceList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched endpointSlice.
+func (c *FakeEndpointSlices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.EndpointSlice, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(endpointslicesResource, c.ns, name, pt, data, subresources...), &v1.EndpointSlice{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1.EndpointSlice), err
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1/generated_expansion.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1/generated_expansion.go
new file mode 100644
index 000000000..7295b601e
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1/generated_expansion.go
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+type EndpointSliceExpansion interface{}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1beta1/discovery_client.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1beta1/discovery_client.go
new file mode 100644
index 000000000..d2d556581
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1beta1/discovery_client.go
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ "net/http"
+
+ v1beta1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1"
+ "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/scheme"
+ rest "k8s.io/client-go/rest"
+)
+
+type DiscoveryV1beta1Interface interface {
+ RESTClient() rest.Interface
+ EndpointSlicesGetter
+}
+
+// DiscoveryV1beta1Client is used to interact with features provided by the discovery.k8s.io group.
+type DiscoveryV1beta1Client struct {
+ restClient rest.Interface
+}
+
+func (c *DiscoveryV1beta1Client) EndpointSlices(namespace string) EndpointSliceInterface {
+ return newEndpointSlices(c, namespace)
+}
+
+// NewForConfig creates a new DiscoveryV1beta1Client for the given config.
+// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
+// where httpClient was generated with rest.HTTPClientFor(c).
+func NewForConfig(c *rest.Config) (*DiscoveryV1beta1Client, error) {
+ config := *c
+ if err := setConfigDefaults(&config); err != nil {
+ return nil, err
+ }
+ httpClient, err := rest.HTTPClientFor(&config)
+ if err != nil {
+ return nil, err
+ }
+ return NewForConfigAndClient(&config, httpClient)
+}
+
+// NewForConfigAndClient creates a new DiscoveryV1beta1Client for the given config and http client.
+// Note the http client provided takes precedence over the configured transport values.
+func NewForConfigAndClient(c *rest.Config, h *http.Client) (*DiscoveryV1beta1Client, error) {
+ config := *c
+ if err := setConfigDefaults(&config); err != nil {
+ return nil, err
+ }
+ client, err := rest.RESTClientForConfigAndClient(&config, h)
+ if err != nil {
+ return nil, err
+ }
+ return &DiscoveryV1beta1Client{client}, nil
+}
+
+// NewForConfigOrDie creates a new DiscoveryV1beta1Client for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *DiscoveryV1beta1Client {
+ client, err := NewForConfig(c)
+ if err != nil {
+ panic(err)
+ }
+ return client
+}
+
+// New creates a new DiscoveryV1beta1Client for the given RESTClient.
+func New(c rest.Interface) *DiscoveryV1beta1Client {
+ return &DiscoveryV1beta1Client{c}
+}
+
+func setConfigDefaults(config *rest.Config) error {
+ gv := v1beta1.SchemeGroupVersion
+ config.GroupVersion = &gv
+ config.APIPath = "/apis"
+ config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+
+ if config.UserAgent == "" {
+ config.UserAgent = rest.DefaultKubernetesUserAgent()
+ }
+
+ return nil
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *DiscoveryV1beta1Client) RESTClient() rest.Interface {
+ if c == nil {
+ return nil
+ }
+ return c.restClient
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1beta1/doc.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1beta1/doc.go
new file mode 100644
index 000000000..07e23ed4a
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1beta1/doc.go
@@ -0,0 +1,7 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated typed clients.
+package v1beta1
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1beta1/endpointslice.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1beta1/endpointslice.go
new file mode 100644
index 000000000..a77bbb97e
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1beta1/endpointslice.go
@@ -0,0 +1,165 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ "context"
+ "time"
+
+ v1beta1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1"
+ scheme "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/scheme"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// EndpointSlicesGetter has a method to return a EndpointSliceInterface.
+// A group's client should implement this interface.
+type EndpointSlicesGetter interface {
+ EndpointSlices(namespace string) EndpointSliceInterface
+}
+
+// EndpointSliceInterface has methods to work with EndpointSlice resources.
+type EndpointSliceInterface interface {
+ Create(ctx context.Context, endpointSlice *v1beta1.EndpointSlice, opts v1.CreateOptions) (*v1beta1.EndpointSlice, error)
+ Update(ctx context.Context, endpointSlice *v1beta1.EndpointSlice, opts v1.UpdateOptions) (*v1beta1.EndpointSlice, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.EndpointSlice, error)
+ List(ctx context.Context, opts v1.ListOptions) (*v1beta1.EndpointSliceList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.EndpointSlice, err error)
+ EndpointSliceExpansion
+}
+
+// endpointSlices implements EndpointSliceInterface
+type endpointSlices struct {
+ client rest.Interface
+ ns string
+}
+
+// newEndpointSlices returns a EndpointSlices
+func newEndpointSlices(c *DiscoveryV1beta1Client, namespace string) *endpointSlices {
+ return &endpointSlices{
+ client: c.RESTClient(),
+ ns: namespace,
+ }
+}
+
+// Get takes name of the endpointSlice, and returns the corresponding endpointSlice object, and an error if there is any.
+func (c *endpointSlices) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.EndpointSlice, err error) {
+ result = &v1beta1.EndpointSlice{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("endpointslices").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of EndpointSlices that match those selectors.
+func (c *endpointSlices) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.EndpointSliceList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1beta1.EndpointSliceList{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("endpointslices").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested endpointSlices.
+func (c *endpointSlices) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Namespace(c.ns).
+ Resource("endpointslices").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a endpointSlice and creates it. Returns the server's representation of the endpointSlice, and an error, if there is any.
+func (c *endpointSlices) Create(ctx context.Context, endpointSlice *v1beta1.EndpointSlice, opts v1.CreateOptions) (result *v1beta1.EndpointSlice, err error) {
+ result = &v1beta1.EndpointSlice{}
+ err = c.client.Post().
+ Namespace(c.ns).
+ Resource("endpointslices").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(endpointSlice).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a endpointSlice and updates it. Returns the server's representation of the endpointSlice, and an error, if there is any.
+func (c *endpointSlices) Update(ctx context.Context, endpointSlice *v1beta1.EndpointSlice, opts v1.UpdateOptions) (result *v1beta1.EndpointSlice, err error) {
+ result = &v1beta1.EndpointSlice{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("endpointslices").
+ Name(endpointSlice.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(endpointSlice).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the endpointSlice and deletes it. Returns an error if one occurs.
+func (c *endpointSlices) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("endpointslices").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *endpointSlices) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("endpointslices").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched endpointSlice.
+func (c *endpointSlices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.EndpointSlice, err error) {
+ result = &v1beta1.EndpointSlice{}
+ err = c.client.Patch(pt).
+ Namespace(c.ns).
+ Resource("endpointslices").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1beta1/fake/doc.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1beta1/fake/doc.go
new file mode 100644
index 000000000..57bd090ef
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1beta1/fake/doc.go
@@ -0,0 +1,7 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// Package fake has the automatically generated clients.
+package fake
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1beta1/fake/fake_discovery_client.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1beta1/fake/fake_discovery_client.go
new file mode 100644
index 000000000..a1d258d80
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1beta1/fake/fake_discovery_client.go
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1beta1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1beta1"
+ rest "k8s.io/client-go/rest"
+ testing "k8s.io/client-go/testing"
+)
+
+type FakeDiscoveryV1beta1 struct {
+ *testing.Fake
+}
+
+func (c *FakeDiscoveryV1beta1) EndpointSlices(namespace string) v1beta1.EndpointSliceInterface {
+ return &FakeEndpointSlices{c, namespace}
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *FakeDiscoveryV1beta1) RESTClient() rest.Interface {
+ var ret *rest.RESTClient
+ return ret
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1beta1/fake/fake_endpointslice.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1beta1/fake/fake_endpointslice.go
new file mode 100644
index 000000000..ddc55497d
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1beta1/fake/fake_endpointslice.go
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ "context"
+
+ v1beta1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/discovery/v1beta1"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeEndpointSlices implements EndpointSliceInterface
+type FakeEndpointSlices struct {
+ Fake *FakeDiscoveryV1beta1
+ ns string
+}
+
+var endpointslicesResource = v1beta1.SchemeGroupVersion.WithResource("endpointslices")
+
+var endpointslicesKind = v1beta1.SchemeGroupVersion.WithKind("EndpointSlice")
+
+// Get takes name of the endpointSlice, and returns the corresponding endpointSlice object, and an error if there is any.
+func (c *FakeEndpointSlices) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.EndpointSlice, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(endpointslicesResource, c.ns, name), &v1beta1.EndpointSlice{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.EndpointSlice), err
+}
+
+// List takes label and field selectors, and returns the list of EndpointSlices that match those selectors.
+func (c *FakeEndpointSlices) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.EndpointSliceList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(endpointslicesResource, endpointslicesKind, c.ns, opts), &v1beta1.EndpointSliceList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1beta1.EndpointSliceList{ListMeta: obj.(*v1beta1.EndpointSliceList).ListMeta}
+ for _, item := range obj.(*v1beta1.EndpointSliceList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested endpointSlices.
+func (c *FakeEndpointSlices) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(endpointslicesResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a endpointSlice and creates it. Returns the server's representation of the endpointSlice, and an error, if there is any.
+func (c *FakeEndpointSlices) Create(ctx context.Context, endpointSlice *v1beta1.EndpointSlice, opts v1.CreateOptions) (result *v1beta1.EndpointSlice, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(endpointslicesResource, c.ns, endpointSlice), &v1beta1.EndpointSlice{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.EndpointSlice), err
+}
+
+// Update takes the representation of a endpointSlice and updates it. Returns the server's representation of the endpointSlice, and an error, if there is any.
+func (c *FakeEndpointSlices) Update(ctx context.Context, endpointSlice *v1beta1.EndpointSlice, opts v1.UpdateOptions) (result *v1beta1.EndpointSlice, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(endpointslicesResource, c.ns, endpointSlice), &v1beta1.EndpointSlice{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.EndpointSlice), err
+}
+
+// Delete takes name of the endpointSlice and deletes it. Returns an error if one occurs.
+func (c *FakeEndpointSlices) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteActionWithOptions(endpointslicesResource, c.ns, name, opts), &v1beta1.EndpointSlice{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeEndpointSlices) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(endpointslicesResource, c.ns, listOpts)
+
+ _, err := c.Fake.Invokes(action, &v1beta1.EndpointSliceList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched endpointSlice.
+func (c *FakeEndpointSlices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.EndpointSlice, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(endpointslicesResource, c.ns, name, pt, data, subresources...), &v1beta1.EndpointSlice{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1beta1.EndpointSlice), err
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1beta1/generated_expansion.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1beta1/generated_expansion.go
new file mode 100644
index 000000000..1aa54bf9b
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/discovery/v1beta1/generated_expansion.go
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+type EndpointSliceExpansion interface{}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/networking/v1/doc.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/networking/v1/doc.go
new file mode 100644
index 000000000..50cfbd485
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/networking/v1/doc.go
@@ -0,0 +1,7 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated typed clients.
+package v1
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/networking/v1/fake/doc.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/networking/v1/fake/doc.go
new file mode 100644
index 000000000..57bd090ef
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/networking/v1/fake/doc.go
@@ -0,0 +1,7 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// Package fake has the automatically generated clients.
+package fake
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/networking/v1/fake/fake_ingress.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/networking/v1/fake/fake_ingress.go
new file mode 100644
index 000000000..c32e6bec4
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/networking/v1/fake/fake_ingress.go
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ "context"
+
+ v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeIngresses implements IngressInterface
+type FakeIngresses struct {
+ Fake *FakeNetworkingV1
+ ns string
+}
+
+var ingressesResource = v1.SchemeGroupVersion.WithResource("ingresses")
+
+var ingressesKind = v1.SchemeGroupVersion.WithKind("Ingress")
+
+// Get takes name of the ingress, and returns the corresponding ingress object, and an error if there is any.
+func (c *FakeIngresses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Ingress, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(ingressesResource, c.ns, name), &v1.Ingress{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1.Ingress), err
+}
+
+// List takes label and field selectors, and returns the list of Ingresses that match those selectors.
+func (c *FakeIngresses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.IngressList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(ingressesResource, ingressesKind, c.ns, opts), &v1.IngressList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1.IngressList{ListMeta: obj.(*v1.IngressList).ListMeta}
+ for _, item := range obj.(*v1.IngressList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested ingresses.
+func (c *FakeIngresses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(ingressesResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a ingress and creates it. Returns the server's representation of the ingress, and an error, if there is any.
+func (c *FakeIngresses) Create(ctx context.Context, ingress *v1.Ingress, opts metav1.CreateOptions) (result *v1.Ingress, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(ingressesResource, c.ns, ingress), &v1.Ingress{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1.Ingress), err
+}
+
+// Update takes the representation of a ingress and updates it. Returns the server's representation of the ingress, and an error, if there is any.
+func (c *FakeIngresses) Update(ctx context.Context, ingress *v1.Ingress, opts metav1.UpdateOptions) (result *v1.Ingress, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(ingressesResource, c.ns, ingress), &v1.Ingress{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1.Ingress), err
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *FakeIngresses) UpdateStatus(ctx context.Context, ingress *v1.Ingress, opts metav1.UpdateOptions) (*v1.Ingress, error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateSubresourceAction(ingressesResource, "status", c.ns, ingress), &v1.Ingress{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1.Ingress), err
+}
+
+// Delete takes name of the ingress and deletes it. Returns an error if one occurs.
+func (c *FakeIngresses) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteActionWithOptions(ingressesResource, c.ns, name, opts), &v1.Ingress{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeIngresses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(ingressesResource, c.ns, listOpts)
+
+ _, err := c.Fake.Invokes(action, &v1.IngressList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched ingress.
+func (c *FakeIngresses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Ingress, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(ingressesResource, c.ns, name, pt, data, subresources...), &v1.Ingress{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1.Ingress), err
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/networking/v1/fake/fake_ingressclass.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/networking/v1/fake/fake_ingressclass.go
new file mode 100644
index 000000000..9a66ab833
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/networking/v1/fake/fake_ingressclass.go
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ "context"
+
+ v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeIngressClasses implements IngressClassInterface
+type FakeIngressClasses struct {
+ Fake *FakeNetworkingV1
+}
+
+var ingressclassesResource = v1.SchemeGroupVersion.WithResource("ingressclasses")
+
+var ingressclassesKind = v1.SchemeGroupVersion.WithKind("IngressClass")
+
+// Get takes name of the ingressClass, and returns the corresponding ingressClass object, and an error if there is any.
+func (c *FakeIngressClasses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.IngressClass, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootGetAction(ingressclassesResource, name), &v1.IngressClass{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1.IngressClass), err
+}
+
+// List takes label and field selectors, and returns the list of IngressClasses that match those selectors.
+func (c *FakeIngressClasses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.IngressClassList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootListAction(ingressclassesResource, ingressclassesKind, opts), &v1.IngressClassList{})
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1.IngressClassList{ListMeta: obj.(*v1.IngressClassList).ListMeta}
+ for _, item := range obj.(*v1.IngressClassList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested ingressClasses.
+func (c *FakeIngressClasses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewRootWatchAction(ingressclassesResource, opts))
+}
+
+// Create takes the representation of a ingressClass and creates it. Returns the server's representation of the ingressClass, and an error, if there is any.
+func (c *FakeIngressClasses) Create(ctx context.Context, ingressClass *v1.IngressClass, opts metav1.CreateOptions) (result *v1.IngressClass, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootCreateAction(ingressclassesResource, ingressClass), &v1.IngressClass{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1.IngressClass), err
+}
+
+// Update takes the representation of a ingressClass and updates it. Returns the server's representation of the ingressClass, and an error, if there is any.
+func (c *FakeIngressClasses) Update(ctx context.Context, ingressClass *v1.IngressClass, opts metav1.UpdateOptions) (result *v1.IngressClass, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootUpdateAction(ingressclassesResource, ingressClass), &v1.IngressClass{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1.IngressClass), err
+}
+
+// Delete takes name of the ingressClass and deletes it. Returns an error if one occurs.
+func (c *FakeIngressClasses) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewRootDeleteActionWithOptions(ingressclassesResource, name, opts), &v1.IngressClass{})
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeIngressClasses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
+ action := testing.NewRootDeleteCollectionAction(ingressclassesResource, listOpts)
+
+ _, err := c.Fake.Invokes(action, &v1.IngressClassList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched ingressClass.
+func (c *FakeIngressClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.IngressClass, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewRootPatchSubresourceAction(ingressclassesResource, name, pt, data, subresources...), &v1.IngressClass{})
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1.IngressClass), err
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/networking/v1/fake/fake_networking_client.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/networking/v1/fake/fake_networking_client.go
new file mode 100644
index 000000000..016a75e89
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/networking/v1/fake/fake_networking_client.go
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/networking/v1"
+ rest "k8s.io/client-go/rest"
+ testing "k8s.io/client-go/testing"
+)
+
+type FakeNetworkingV1 struct {
+ *testing.Fake
+}
+
+func (c *FakeNetworkingV1) Ingresses(namespace string) v1.IngressInterface {
+ return &FakeIngresses{c, namespace}
+}
+
+func (c *FakeNetworkingV1) IngressClasses() v1.IngressClassInterface {
+ return &FakeIngressClasses{c}
+}
+
+func (c *FakeNetworkingV1) NetworkPolicies(namespace string) v1.NetworkPolicyInterface {
+ return &FakeNetworkPolicies{c, namespace}
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *FakeNetworkingV1) RESTClient() rest.Interface {
+ var ret *rest.RESTClient
+ return ret
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/networking/v1/fake/fake_networkpolicy.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/networking/v1/fake/fake_networkpolicy.go
new file mode 100644
index 000000000..e51d9f367
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/networking/v1/fake/fake_networkpolicy.go
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package fake
+
+import (
+ "context"
+
+ v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ labels "k8s.io/apimachinery/pkg/labels"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ testing "k8s.io/client-go/testing"
+)
+
+// FakeNetworkPolicies implements NetworkPolicyInterface
+type FakeNetworkPolicies struct {
+ Fake *FakeNetworkingV1
+ ns string
+}
+
+var networkpoliciesResource = v1.SchemeGroupVersion.WithResource("networkpolicies")
+
+var networkpoliciesKind = v1.SchemeGroupVersion.WithKind("NetworkPolicy")
+
+// Get takes name of the networkPolicy, and returns the corresponding networkPolicy object, and an error if there is any.
+func (c *FakeNetworkPolicies) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.NetworkPolicy, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewGetAction(networkpoliciesResource, c.ns, name), &v1.NetworkPolicy{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1.NetworkPolicy), err
+}
+
+// List takes label and field selectors, and returns the list of NetworkPolicies that match those selectors.
+func (c *FakeNetworkPolicies) List(ctx context.Context, opts metav1.ListOptions) (result *v1.NetworkPolicyList, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewListAction(networkpoliciesResource, networkpoliciesKind, c.ns, opts), &v1.NetworkPolicyList{})
+
+ if obj == nil {
+ return nil, err
+ }
+
+ label, _, _ := testing.ExtractFromListOptions(opts)
+ if label == nil {
+ label = labels.Everything()
+ }
+ list := &v1.NetworkPolicyList{ListMeta: obj.(*v1.NetworkPolicyList).ListMeta}
+ for _, item := range obj.(*v1.NetworkPolicyList).Items {
+ if label.Matches(labels.Set(item.Labels)) {
+ list.Items = append(list.Items, item)
+ }
+ }
+ return list, err
+}
+
+// Watch returns a watch.Interface that watches the requested networkPolicies.
+func (c *FakeNetworkPolicies) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
+ return c.Fake.
+ InvokesWatch(testing.NewWatchAction(networkpoliciesResource, c.ns, opts))
+
+}
+
+// Create takes the representation of a networkPolicy and creates it. Returns the server's representation of the networkPolicy, and an error, if there is any.
+func (c *FakeNetworkPolicies) Create(ctx context.Context, networkPolicy *v1.NetworkPolicy, opts metav1.CreateOptions) (result *v1.NetworkPolicy, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewCreateAction(networkpoliciesResource, c.ns, networkPolicy), &v1.NetworkPolicy{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1.NetworkPolicy), err
+}
+
+// Update takes the representation of a networkPolicy and updates it. Returns the server's representation of the networkPolicy, and an error, if there is any.
+func (c *FakeNetworkPolicies) Update(ctx context.Context, networkPolicy *v1.NetworkPolicy, opts metav1.UpdateOptions) (result *v1.NetworkPolicy, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewUpdateAction(networkpoliciesResource, c.ns, networkPolicy), &v1.NetworkPolicy{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1.NetworkPolicy), err
+}
+
+// Delete takes name of the networkPolicy and deletes it. Returns an error if one occurs.
+func (c *FakeNetworkPolicies) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
+ _, err := c.Fake.
+ Invokes(testing.NewDeleteActionWithOptions(networkpoliciesResource, c.ns, name, opts), &v1.NetworkPolicy{})
+
+ return err
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *FakeNetworkPolicies) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
+ action := testing.NewDeleteCollectionAction(networkpoliciesResource, c.ns, listOpts)
+
+ _, err := c.Fake.Invokes(action, &v1.NetworkPolicyList{})
+ return err
+}
+
+// Patch applies the patch and returns the patched networkPolicy.
+func (c *FakeNetworkPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.NetworkPolicy, err error) {
+ obj, err := c.Fake.
+ Invokes(testing.NewPatchSubresourceAction(networkpoliciesResource, c.ns, name, pt, data, subresources...), &v1.NetworkPolicy{})
+
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*v1.NetworkPolicy), err
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/networking/v1/generated_expansion.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/networking/v1/generated_expansion.go
new file mode 100644
index 000000000..dfd973ef0
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/networking/v1/generated_expansion.go
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+type IngressExpansion interface{}
+
+type IngressClassExpansion interface{}
+
+type NetworkPolicyExpansion interface{}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/networking/v1/ingress.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/networking/v1/ingress.go
new file mode 100644
index 000000000..60da69571
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/networking/v1/ingress.go
@@ -0,0 +1,182 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ "time"
+
+ v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1"
+ scheme "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/scheme"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// IngressesGetter has a method to return a IngressInterface.
+// A group's client should implement this interface.
+type IngressesGetter interface {
+ Ingresses(namespace string) IngressInterface
+}
+
+// IngressInterface has methods to work with Ingress resources.
+type IngressInterface interface {
+ Create(ctx context.Context, ingress *v1.Ingress, opts metav1.CreateOptions) (*v1.Ingress, error)
+ Update(ctx context.Context, ingress *v1.Ingress, opts metav1.UpdateOptions) (*v1.Ingress, error)
+ UpdateStatus(ctx context.Context, ingress *v1.Ingress, opts metav1.UpdateOptions) (*v1.Ingress, error)
+ Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Ingress, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*v1.IngressList, error)
+ Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Ingress, err error)
+ IngressExpansion
+}
+
+// ingresses implements IngressInterface
+type ingresses struct {
+ client rest.Interface
+ ns string
+}
+
+// newIngresses returns a Ingresses
+func newIngresses(c *NetworkingV1Client, namespace string) *ingresses {
+ return &ingresses{
+ client: c.RESTClient(),
+ ns: namespace,
+ }
+}
+
+// Get takes name of the ingress, and returns the corresponding ingress object, and an error if there is any.
+func (c *ingresses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Ingress, err error) {
+ result = &v1.Ingress{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("ingresses").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of Ingresses that match those selectors.
+func (c *ingresses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.IngressList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1.IngressList{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("ingresses").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested ingresses.
+func (c *ingresses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Namespace(c.ns).
+ Resource("ingresses").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a ingress and creates it. Returns the server's representation of the ingress, and an error, if there is any.
+func (c *ingresses) Create(ctx context.Context, ingress *v1.Ingress, opts metav1.CreateOptions) (result *v1.Ingress, err error) {
+ result = &v1.Ingress{}
+ err = c.client.Post().
+ Namespace(c.ns).
+ Resource("ingresses").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(ingress).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a ingress and updates it. Returns the server's representation of the ingress, and an error, if there is any.
+func (c *ingresses) Update(ctx context.Context, ingress *v1.Ingress, opts metav1.UpdateOptions) (result *v1.Ingress, err error) {
+ result = &v1.Ingress{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("ingresses").
+ Name(ingress.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(ingress).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *ingresses) UpdateStatus(ctx context.Context, ingress *v1.Ingress, opts metav1.UpdateOptions) (result *v1.Ingress, err error) {
+ result = &v1.Ingress{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("ingresses").
+ Name(ingress.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(ingress).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the ingress and deletes it. Returns an error if one occurs.
+func (c *ingresses) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("ingresses").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *ingresses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("ingresses").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched ingress.
+func (c *ingresses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Ingress, err error) {
+ result = &v1.Ingress{}
+ err = c.client.Patch(pt).
+ Namespace(c.ns).
+ Resource("ingresses").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/networking/v1/ingressclass.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/networking/v1/ingressclass.go
new file mode 100644
index 000000000..e84aedc45
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/networking/v1/ingressclass.go
@@ -0,0 +1,155 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ "time"
+
+ v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1"
+ scheme "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/scheme"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// IngressClassesGetter has a method to return a IngressClassInterface.
+// A group's client should implement this interface.
+type IngressClassesGetter interface {
+ IngressClasses() IngressClassInterface
+}
+
+// IngressClassInterface has methods to work with IngressClass resources.
+type IngressClassInterface interface {
+ Create(ctx context.Context, ingressClass *v1.IngressClass, opts metav1.CreateOptions) (*v1.IngressClass, error)
+ Update(ctx context.Context, ingressClass *v1.IngressClass, opts metav1.UpdateOptions) (*v1.IngressClass, error)
+ Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.IngressClass, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*v1.IngressClassList, error)
+ Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.IngressClass, err error)
+ IngressClassExpansion
+}
+
+// ingressClasses implements IngressClassInterface
+type ingressClasses struct {
+ client rest.Interface
+}
+
+// newIngressClasses returns a IngressClasses
+func newIngressClasses(c *NetworkingV1Client) *ingressClasses {
+ return &ingressClasses{
+ client: c.RESTClient(),
+ }
+}
+
+// Get takes name of the ingressClass, and returns the corresponding ingressClass object, and an error if there is any.
+func (c *ingressClasses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.IngressClass, err error) {
+ result = &v1.IngressClass{}
+ err = c.client.Get().
+ Resource("ingressclasses").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of IngressClasses that match those selectors.
+func (c *ingressClasses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.IngressClassList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1.IngressClassList{}
+ err = c.client.Get().
+ Resource("ingressclasses").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested ingressClasses.
+func (c *ingressClasses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Resource("ingressclasses").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a ingressClass and creates it. Returns the server's representation of the ingressClass, and an error, if there is any.
+func (c *ingressClasses) Create(ctx context.Context, ingressClass *v1.IngressClass, opts metav1.CreateOptions) (result *v1.IngressClass, err error) {
+ result = &v1.IngressClass{}
+ err = c.client.Post().
+ Resource("ingressclasses").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(ingressClass).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a ingressClass and updates it. Returns the server's representation of the ingressClass, and an error, if there is any.
+func (c *ingressClasses) Update(ctx context.Context, ingressClass *v1.IngressClass, opts metav1.UpdateOptions) (result *v1.IngressClass, err error) {
+ result = &v1.IngressClass{}
+ err = c.client.Put().
+ Resource("ingressclasses").
+ Name(ingressClass.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(ingressClass).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the ingressClass and deletes it. Returns an error if one occurs.
+func (c *ingressClasses) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
+ return c.client.Delete().
+ Resource("ingressclasses").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *ingressClasses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Resource("ingressclasses").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched ingressClass.
+func (c *ingressClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.IngressClass, err error) {
+ result = &v1.IngressClass{}
+ err = c.client.Patch(pt).
+ Resource("ingressclasses").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/networking/v1/networking_client.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/networking/v1/networking_client.go
new file mode 100644
index 000000000..6edb01afd
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/networking/v1/networking_client.go
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "net/http"
+
+ v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1"
+ "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/scheme"
+ rest "k8s.io/client-go/rest"
+)
+
+type NetworkingV1Interface interface {
+ RESTClient() rest.Interface
+ IngressesGetter
+ IngressClassesGetter
+ NetworkPoliciesGetter
+}
+
+// NetworkingV1Client is used to interact with features provided by the networking.k8s.io group.
+type NetworkingV1Client struct {
+ restClient rest.Interface
+}
+
+func (c *NetworkingV1Client) Ingresses(namespace string) IngressInterface {
+ return newIngresses(c, namespace)
+}
+
+func (c *NetworkingV1Client) IngressClasses() IngressClassInterface {
+ return newIngressClasses(c)
+}
+
+func (c *NetworkingV1Client) NetworkPolicies(namespace string) NetworkPolicyInterface {
+ return newNetworkPolicies(c, namespace)
+}
+
+// NewForConfig creates a new NetworkingV1Client for the given config.
+// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
+// where httpClient was generated with rest.HTTPClientFor(c).
+func NewForConfig(c *rest.Config) (*NetworkingV1Client, error) {
+ config := *c
+ if err := setConfigDefaults(&config); err != nil {
+ return nil, err
+ }
+ httpClient, err := rest.HTTPClientFor(&config)
+ if err != nil {
+ return nil, err
+ }
+ return NewForConfigAndClient(&config, httpClient)
+}
+
+// NewForConfigAndClient creates a new NetworkingV1Client for the given config and http client.
+// Note the http client provided takes precedence over the configured transport values.
+func NewForConfigAndClient(c *rest.Config, h *http.Client) (*NetworkingV1Client, error) {
+ config := *c
+ if err := setConfigDefaults(&config); err != nil {
+ return nil, err
+ }
+ client, err := rest.RESTClientForConfigAndClient(&config, h)
+ if err != nil {
+ return nil, err
+ }
+ return &NetworkingV1Client{client}, nil
+}
+
+// NewForConfigOrDie creates a new NetworkingV1Client for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *NetworkingV1Client {
+ client, err := NewForConfig(c)
+ if err != nil {
+ panic(err)
+ }
+ return client
+}
+
+// New creates a new NetworkingV1Client for the given RESTClient.
+func New(c rest.Interface) *NetworkingV1Client {
+ return &NetworkingV1Client{c}
+}
+
+func setConfigDefaults(config *rest.Config) error {
+ gv := v1.SchemeGroupVersion
+ config.GroupVersion = &gv
+ config.APIPath = "/apis"
+ config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+
+ if config.UserAgent == "" {
+ config.UserAgent = rest.DefaultKubernetesUserAgent()
+ }
+
+ return nil
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *NetworkingV1Client) RESTClient() rest.Interface {
+ if c == nil {
+ return nil
+ }
+ return c.restClient
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/networking/v1/networkpolicy.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/networking/v1/networkpolicy.go
new file mode 100644
index 000000000..f657e00a3
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/networking/v1/networkpolicy.go
@@ -0,0 +1,165 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ "time"
+
+ v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/networking/v1"
+ scheme "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/scheme"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+)
+
+// NetworkPoliciesGetter has a method to return a NetworkPolicyInterface.
+// A group's client should implement this interface.
+type NetworkPoliciesGetter interface {
+ NetworkPolicies(namespace string) NetworkPolicyInterface
+}
+
+// NetworkPolicyInterface has methods to work with NetworkPolicy resources.
+type NetworkPolicyInterface interface {
+ Create(ctx context.Context, networkPolicy *v1.NetworkPolicy, opts metav1.CreateOptions) (*v1.NetworkPolicy, error)
+ Update(ctx context.Context, networkPolicy *v1.NetworkPolicy, opts metav1.UpdateOptions) (*v1.NetworkPolicy, error)
+ Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.NetworkPolicy, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*v1.NetworkPolicyList, error)
+ Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.NetworkPolicy, err error)
+ NetworkPolicyExpansion
+}
+
+// networkPolicies implements NetworkPolicyInterface
+type networkPolicies struct {
+ client rest.Interface
+ ns string
+}
+
+// newNetworkPolicies returns a NetworkPolicies
+func newNetworkPolicies(c *NetworkingV1Client, namespace string) *networkPolicies {
+ return &networkPolicies{
+ client: c.RESTClient(),
+ ns: namespace,
+ }
+}
+
+// Get takes name of the networkPolicy, and returns the corresponding networkPolicy object, and an error if there is any.
+func (c *networkPolicies) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.NetworkPolicy, err error) {
+ result = &v1.NetworkPolicy{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("networkpolicies").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of NetworkPolicies that match those selectors.
+func (c *networkPolicies) List(ctx context.Context, opts metav1.ListOptions) (result *v1.NetworkPolicyList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1.NetworkPolicyList{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("networkpolicies").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested networkPolicies.
+func (c *networkPolicies) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Namespace(c.ns).
+ Resource("networkpolicies").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a networkPolicy and creates it. Returns the server's representation of the networkPolicy, and an error, if there is any.
+func (c *networkPolicies) Create(ctx context.Context, networkPolicy *v1.NetworkPolicy, opts metav1.CreateOptions) (result *v1.NetworkPolicy, err error) {
+ result = &v1.NetworkPolicy{}
+ err = c.client.Post().
+ Namespace(c.ns).
+ Resource("networkpolicies").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(networkPolicy).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a networkPolicy and updates it. Returns the server's representation of the networkPolicy, and an error, if there is any.
+func (c *networkPolicies) Update(ctx context.Context, networkPolicy *v1.NetworkPolicy, opts metav1.UpdateOptions) (result *v1.NetworkPolicy, err error) {
+ result = &v1.NetworkPolicy{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("networkpolicies").
+ Name(networkPolicy.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(networkPolicy).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the networkPolicy and deletes it. Returns an error if one occurs.
+func (c *networkPolicies) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("networkpolicies").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *networkPolicies) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("networkpolicies").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched networkPolicy.
+func (c *networkPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.NetworkPolicy, err error) {
+ result = &v1.NetworkPolicy{}
+ err = c.client.Patch(pt).
+ Namespace(c.ns).
+ Resource("networkpolicies").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/utils/listwatcher.go b/vendor/github.com/cilium/cilium/pkg/k8s/utils/listwatcher.go
new file mode 100644
index 000000000..12d71835f
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/utils/listwatcher.go
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package utils
+
+import (
+ "context"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/fields"
+ k8sRuntime "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/watch"
+ "k8s.io/client-go/tools/cache"
+)
+
+// typedListWatcher is a generic interface that all the typed k8s clients match.
+type typedListWatcher[T k8sRuntime.Object] interface {
+ List(ctx context.Context, opts metav1.ListOptions) (T, error)
+ Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+}
+
+// genListWatcher takes a typed list watcher and implements cache.ListWatch
+// using it.
+type genListWatcher[T k8sRuntime.Object] struct {
+ lw typedListWatcher[T]
+}
+
+func (g *genListWatcher[T]) List(opts metav1.ListOptions) (k8sRuntime.Object, error) {
+ return g.lw.List(context.Background(), opts)
+}
+
+func (g *genListWatcher[T]) Watch(opts metav1.ListOptions) (watch.Interface, error) {
+ return g.lw.Watch(context.Background(), opts)
+}
+
+// ListerWatcherFromTyped adapts a typed k8s client to cache.ListerWatcher so it can be used
+// with an informer. With this construction we can use fake clients for testing,
+// which would not be possible if we used NewListWatchFromClient and RESTClient().
+func ListerWatcherFromTyped[T k8sRuntime.Object](lw typedListWatcher[T]) cache.ListerWatcher {
+ return &genListWatcher[T]{lw: lw}
+}
+
+type listWatcherWithModifier struct {
+ inner cache.ListerWatcher
+ optsModifier func(*metav1.ListOptions)
+}
+
+func (lw *listWatcherWithModifier) List(opts metav1.ListOptions) (k8sRuntime.Object, error) {
+ lw.optsModifier(&opts)
+ return lw.inner.List(opts)
+}
+
+func (lw *listWatcherWithModifier) Watch(opts metav1.ListOptions) (watch.Interface, error) {
+ lw.optsModifier(&opts)
+ return lw.inner.Watch(opts)
+}
+
+func ListerWatcherWithFields(lw cache.ListerWatcher, fieldSelector fields.Selector) cache.ListerWatcher {
+ return ListerWatcherWithModifier(
+ lw,
+ func(opts *metav1.ListOptions) { opts.FieldSelector = fieldSelector.String() })
+}
+
+func ListerWatcherWithModifier(lw cache.ListerWatcher, optsModifier func(*metav1.ListOptions)) cache.ListerWatcher {
+ return &listWatcherWithModifier{
+ inner: lw,
+ optsModifier: optsModifier,
+ }
+}
+
+func ListerWatcherWithModifiers(lw cache.ListerWatcher, opts ...func(*metav1.ListOptions)) cache.ListerWatcher {
+ for _, opt := range opts {
+ lw = ListerWatcherWithModifier(lw, opt)
+ }
+ return lw
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/utils/utils.go b/vendor/github.com/cilium/cilium/pkg/k8s/utils/utils.go
new file mode 100644
index 000000000..ea778e743
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/utils/utils.go
@@ -0,0 +1,273 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package utils
+
+import (
+ "net"
+ "sort"
+ "strings"
+
+ v1 "k8s.io/api/core/v1"
+ v1meta "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ "github.com/cilium/cilium/pkg/ip"
+ k8sconst "github.com/cilium/cilium/pkg/k8s/apis/cilium.io"
+ slim_corev1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1"
+ "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/labels"
+ "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/selection"
+ labelsPkg "github.com/cilium/cilium/pkg/labels"
+)
+
+const (
+ // ServiceProxyNameLabel is the label for service proxy name in k8s service related
+ // objects.
+ serviceProxyNameLabel = "service.kubernetes.io/service-proxy-name"
+)
+
+type NamespaceNameGetter interface {
+ GetNamespace() string
+ GetName() string
+}
+
+// ExtractNamespace extracts the namespace of ObjectMeta.
+// For cluster scoped objects the Namespace field is empty and this function
+// assumes that the object is returned from kubernetes itself implying that
+// the namespace is empty only and only when the Object is cluster scoped
+// and thus returns empty namespace for such objects.
+func ExtractNamespace(np NamespaceNameGetter) string {
+ return np.GetNamespace()
+}
+
+// ExtractNamespaceOrDefault extracts the namespace of ObjectMeta, it returns default
+// namespace if the namespace field in the ObjectMeta is empty.
+func ExtractNamespaceOrDefault(np NamespaceNameGetter) string {
+ ns := np.GetNamespace()
+ if ns == "" {
+ return v1.NamespaceDefault
+ }
+
+ return ns
+}
+
+// GetObjNamespaceName returns the object's namespace and name.
+// If the object is cluster scoped then the function returns only the object name
+// without any namespace prefix.
+func GetObjNamespaceName(obj NamespaceNameGetter) string {
+ ns := ExtractNamespace(obj)
+ if ns == "" {
+ return obj.GetName()
+ }
+
+ return ns + "/" + obj.GetName()
+}
+
+// IngressConfiguration is the required configuration for GetServiceAndEndpointListOptionsModifier
+type IngressConfiguration interface {
+ // K8sIngressControllerEnabled returns true if ingress controller feature is enabled in Cilium
+ K8sIngressControllerEnabled() bool
+}
+
+// GatewayAPIConfiguration is the required configuration for GetServiceAndEndpointListOptionsModifier
+type GatewayAPIConfiguration interface {
+ // K8sGatewayAPIEnabled returns true if gateway API is enabled in Cilium
+ K8sGatewayAPIEnabled() bool
+}
+
+// PolicyConfiguration is the required configuration for K8s NetworkPolicy
+type PolicyConfiguration interface {
+ // K8sNetworkPolicyEnabled returns true if cilium agent needs to support K8s NetworkPolicy
+ K8sNetworkPolicyEnabled() bool
+}
+
+// GetEndpointSliceListOptionsModifier returns the options modifier for endpointSlice object list.
+// This methods returns a ListOptions modifier which adds a label selector to
+// select all endpointSlice objects that do not contain the k8s headless service label.
+// This is the same behavior as kube-proxy.
+// Given label mirroring from the service objects to endpoint slice objects were introduced in Kubernetes PR 94443,
+// and released as part of Kubernetes v1.20; we can start using GetServiceAndEndpointListOptionsModifier for
+// endpoint slices when dropping support for Kubernetes v1.19 and older. We can do that since the
+// serviceProxyNameLabel label will then be mirrored to endpoint slices for services with that label.
+func GetEndpointSliceListOptionsModifier() (func(options *v1meta.ListOptions), error) {
+ nonHeadlessServiceSelector, err := labels.NewRequirement(v1.IsHeadlessService, selection.DoesNotExist, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ labelSelector := labels.NewSelector()
+ labelSelector = labelSelector.Add(*nonHeadlessServiceSelector)
+
+ return func(options *v1meta.ListOptions) {
+ options.LabelSelector = labelSelector.String()
+ }, nil
+}
+
+// GetServiceAndEndpointListOptionsModifier returns the options modifier for service and endpoint object lists.
+// This methods returns a ListOptions modifier which adds a label selector to only
+// select services that are in context of Cilium.
+// Like kube-proxy Cilium does not select services/endpoints containing k8s headless service label.
+// We honor service.kubernetes.io/service-proxy-name label in the service object and only
+// handle services that match our service proxy name. If the service proxy name for Cilium
+// is an empty string, we assume that Cilium is the default service handler in which case
+// we select all services that don't have the above mentioned label.
+func GetServiceAndEndpointListOptionsModifier(k8sServiceProxy string) (func(options *v1meta.ListOptions), error) {
+ var (
+ serviceNameSelector, nonHeadlessServiceSelector *labels.Requirement
+ err error
+ )
+
+ nonHeadlessServiceSelector, err = labels.NewRequirement(v1.IsHeadlessService, selection.DoesNotExist, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ if k8sServiceProxy == "" {
+ serviceNameSelector, err = labels.NewRequirement(
+ serviceProxyNameLabel, selection.DoesNotExist, nil)
+ } else {
+ serviceNameSelector, err = labels.NewRequirement(
+ serviceProxyNameLabel, selection.DoubleEquals, []string{k8sServiceProxy})
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ labelSelector := labels.NewSelector()
+ labelSelector = labelSelector.Add(*serviceNameSelector, *nonHeadlessServiceSelector)
+
+ return func(options *v1meta.ListOptions) {
+ options.LabelSelector = labelSelector.String()
+ }, nil
+}
+
+// GetLatestPodReadiness returns the lastest podReady condition on a given pod.
+func GetLatestPodReadiness(podStatus slim_corev1.PodStatus) slim_corev1.ConditionStatus {
+ for _, cond := range podStatus.Conditions {
+ if cond.Type == slim_corev1.PodReady {
+ return cond.Status
+ }
+ }
+ return slim_corev1.ConditionUnknown
+}
+
+// ValidIPs return a sorted slice of unique IP addresses retrieved from the given PodStatus.
+// Returns an error when no IPs are found.
+func ValidIPs(podStatus slim_corev1.PodStatus) []string {
+ if len(podStatus.PodIPs) == 0 && len(podStatus.PodIP) == 0 {
+ return nil
+ }
+
+ // make it a set first to avoid repeated IP addresses
+ ipsMap := make(map[string]struct{}, 1+len(podStatus.PodIPs))
+ if podStatus.PodIP != "" {
+ ipsMap[podStatus.PodIP] = struct{}{}
+ }
+ for _, podIP := range podStatus.PodIPs {
+ if podIP.IP != "" {
+ ipsMap[podIP.IP] = struct{}{}
+ }
+ }
+
+ ips := make([]string, 0, len(ipsMap))
+ for ipStr := range ipsMap {
+ ips = append(ips, ipStr)
+ }
+ sort.Strings(ips)
+ return ips
+}
+
+// IsPodRunning returns true if the pod is considered to be in running state.
+// We consider a Running pod a pod that does not report a Failed nor a Succeeded
+// pod Phase.
+func IsPodRunning(status slim_corev1.PodStatus) bool {
+ switch status.Phase {
+ case slim_corev1.PodFailed, slim_corev1.PodSucceeded:
+ return false
+ }
+ return true
+}
+
+// GetClusterIPByFamily returns a service clusterip by family.
+// From - https://github.com/kubernetes/kubernetes/blob/release-1.20/pkg/proxy/util/utils.go#L386-L411
+func GetClusterIPByFamily(ipFamily slim_corev1.IPFamily, service *slim_corev1.Service) string {
+ // allowing skew
+ if len(service.Spec.IPFamilies) == 0 {
+ if len(service.Spec.ClusterIP) == 0 || service.Spec.ClusterIP == v1.ClusterIPNone {
+ return ""
+ }
+
+ IsIPv6Family := (ipFamily == slim_corev1.IPv6Protocol)
+ if IsIPv6Family == ip.IsIPv6(net.ParseIP(service.Spec.ClusterIP)) {
+ return service.Spec.ClusterIP
+ }
+
+ return ""
+ }
+
+ for idx, family := range service.Spec.IPFamilies {
+ if family == ipFamily {
+ if idx < len(service.Spec.ClusterIPs) {
+ return service.Spec.ClusterIPs[idx]
+ }
+ }
+ }
+
+ return ""
+}
+
+// SanitizePodLabels makes sure that no important pod labels were overridden manually
+func SanitizePodLabels(labels map[string]string, namespace *slim_corev1.Namespace, serviceAccount, clusterName string) map[string]string {
+ sanitizedLabels := make(map[string]string)
+
+ for k, v := range labels {
+ sanitizedLabels[k] = v
+ }
+ // Sanitize namespace labels
+ for k, v := range namespace.GetLabels() {
+ sanitizedLabels[joinPath(k8sconst.PodNamespaceMetaLabels, k)] = v
+ }
+ // Sanitize namespace name label
+ sanitizedLabels[k8sconst.PodNamespaceLabel] = namespace.ObjectMeta.Name
+ // Sanitize service account name
+ if serviceAccount != "" {
+ sanitizedLabels[k8sconst.PolicyLabelServiceAccount] = serviceAccount
+ } else {
+ delete(sanitizedLabels, k8sconst.PolicyLabelServiceAccount)
+ }
+ // Sanitize cluster name
+ sanitizedLabels[k8sconst.PolicyLabelCluster] = clusterName
+
+ return sanitizedLabels
+}
+
+// StripPodSpecialLabels strips labels that are not supposed to be coming from a k8s pod object
+func StripPodSpecialLabels(labels map[string]string) map[string]string {
+ sanitizedLabels := make(map[string]string)
+ forbiddenKeys := map[string]struct{}{
+ k8sconst.PodNamespaceMetaLabels: {},
+ k8sconst.PolicyLabelServiceAccount: {},
+ k8sconst.PolicyLabelCluster: {},
+ k8sconst.PodNamespaceLabel: {},
+ }
+ for k, v := range labels {
+ // If the key contains the prefix for namespace labels then we will
+ // ignore it.
+ if strings.HasPrefix(k, k8sconst.PodNamespaceMetaLabels) {
+ continue
+ }
+ // If the key belongs to any of the forbiddenKeys then we will ignore
+ // it.
+ _, ok := forbiddenKeys[k]
+ if ok {
+ continue
+ }
+ sanitizedLabels[k] = v
+ }
+ return sanitizedLabels
+}
+
+// joinPath mimics JoinPath from pkg/policy/utils, which could not be imported here due to circular dependency
+func joinPath(a, b string) string {
+ return a + labelsPkg.PathDelimiter + b
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/utils/workload.go b/vendor/github.com/cilium/cilium/pkg/k8s/utils/workload.go
new file mode 100644
index 000000000..4b7d338ee
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/utils/workload.go
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Copyright Istio Authors
+// Copyright Authors of Hubble
+
+// GetWorkloadMetaFromPod and cronJobNameRegexp are copied from
+// https://github.com/istio/istio/blob/1aca7a67afd7b3e1d24fafb2fbfbeaf1e41534c0/pkg/kube/util.go
+//
+// Modifications:
+// GetDeployMetaFromPod has been renamed to GetWorkloadMetaFromPod and has
+// been updated to use the cilium slim API types.
+// We do not store the APIVersion of the owning workload in the TypeMeta
+// either, because it isn't needed for our purposes, and our slim types do not
+// have this field.
+// We fallback to the pod's ownerReference if we cannot find a more suitable
+// workload based on heuristics, whereas the original code defaulted to the
+// pod's name. This may be the case when using ReplicaSets without a Deployment.
+
+package utils
+
+import (
+ "regexp"
+ "strings"
+
+ slim_corev1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1"
+ slim_metav1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1"
+)
+
+var cronJobNameRegexp = regexp.MustCompile(`(.+)-\d{8,10}$`)
+
+// GetWorkloadMetaFromPod heuristically derives workload metadata from the pod spec.
+func GetWorkloadMetaFromPod(pod *slim_corev1.Pod) (slim_metav1.ObjectMeta, slim_metav1.TypeMeta, bool) {
+ if pod == nil {
+ return slim_metav1.ObjectMeta{}, slim_metav1.TypeMeta{}, false
+ }
+ // try to capture more useful namespace/name info for deployments, etc.
+ // TODO(dougreid): expand to enable lookup of OWNERs recursively a la kubernetesenv
+ workloadObjectMeta := pod.ObjectMeta
+ workloadObjectMeta.OwnerReferences = nil
+
+ var ok bool
+ var typeMetadata slim_metav1.TypeMeta
+ if len(pod.GenerateName) > 0 {
+ // if the pod name was generated (or is scheduled for generation), we can begin an investigation into the controlling reference for the pod.
+ var controllerRef slim_metav1.OwnerReference
+ controllerFound := false
+ for _, ref := range pod.OwnerReferences {
+ if ref.Controller != nil && *ref.Controller {
+ controllerRef = ref
+ controllerFound = true
+ break
+ }
+ }
+ if controllerFound {
+ ok = true
+ // default to the owner kind/name
+ typeMetadata.Kind = controllerRef.Kind
+ workloadObjectMeta.Name = controllerRef.Name
+
+ // heuristic for deployment detection
+ if typeMetadata.Kind == "ReplicaSet" && pod.Labels["pod-template-hash"] != "" && strings.HasSuffix(controllerRef.Name, pod.Labels["pod-template-hash"]) {
+ name := strings.TrimSuffix(controllerRef.Name, "-"+pod.Labels["pod-template-hash"])
+ workloadObjectMeta.Name = name
+ typeMetadata.Kind = "Deployment"
+ } else if typeMetadata.Kind == "ReplicaSet" && pod.Labels["pod-template-hash"] == "" {
+ workloadObjectMeta.Name = controllerRef.Name
+ typeMetadata.Kind = "ReplicaSet"
+ } else if typeMetadata.Kind == "ReplicationController" && pod.Labels["deploymentconfig"] != "" {
+ // If the pod is controlled by the replication controller, which is created by the DeploymentConfig resource in
+ // Openshift platform, set the deploy name to the deployment config's name, and the kind to 'DeploymentConfig'.
+ //
+ // nolint: lll
+ // For DeploymentConfig details, refer to
+ // https://docs.openshift.com/container-platform/4.1/applications/deployments/what-deployments-are.html#deployments-and-deploymentconfigs_what-deployments-are
+ //
+ // For the reference to the pod label 'deploymentconfig', refer to
+ // https://github.com/openshift/library-go/blob/7a65fdb398e28782ee1650959a5e0419121e97ae/pkg/apps/appsutil/const.go#L25
+ workloadObjectMeta.Name = pod.Labels["deploymentconfig"]
+ typeMetadata.Kind = "DeploymentConfig"
+ delete(workloadObjectMeta.Labels, "deploymentconfig")
+ } else if typeMetadata.Kind == "Job" {
+ // If job name suffixed with `-`, where the length of digit timestamp is 8~10,
+ // trim the suffix and set kind to cron job.
+ if jn := cronJobNameRegexp.FindStringSubmatch(controllerRef.Name); len(jn) == 2 {
+ workloadObjectMeta.Name = jn[1]
+ typeMetadata.Kind = "CronJob"
+ }
+ }
+ }
+ }
+
+ return workloadObjectMeta, typeMetadata, ok
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/version/version.go b/vendor/github.com/cilium/cilium/pkg/k8s/version/version.go
new file mode 100644
index 000000000..8d0e13cf8
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/k8s/version/version.go
@@ -0,0 +1,309 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Package version keeps track of the Kubernetes version the client is
+// connected to
+package version
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/blang/semver/v4"
+ "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/client-go/kubernetes"
+
+ "github.com/cilium/cilium/pkg/lock"
+ "github.com/cilium/cilium/pkg/logging"
+ "github.com/cilium/cilium/pkg/logging/logfields"
+ "github.com/cilium/cilium/pkg/versioncheck"
+)
+
+var log = logging.DefaultLogger.WithField(logfields.LogSubsys, "k8s")
+
+// ServerCapabilities is a list of server capabilities derived based on
+// version, the Kubernetes discovery API, or probing of individual API
+// endpoints.
+type ServerCapabilities struct {
+ // MinimalVersionMet is true when the minimal version of Kubernetes
+ // required to run Cilium has been met
+ MinimalVersionMet bool
+
+ // EndpointSlice is the ability of k8s server to support endpoint slices
+ EndpointSlice bool
+
+ // EndpointSliceV1 is the ability of k8s server to support endpoint slices
+ // v1. This version was introduced in K8s v1.21.0.
+ EndpointSliceV1 bool
+
+ // LeasesResourceLock is the ability of K8s server to support Lease type
+ // from coordination.k8s.io/v1 API for leader election purposes(currently only in operator).
+ // https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#lease-v1-coordination-k8s-io
+ //
+ // This capability was introduced in K8s version 1.14, prior to which
+ // we don't support HA mode for the cilium-operator.
+ LeasesResourceLock bool
+}
+
+type cachedVersion struct {
+ mutex lock.RWMutex
+ capabilities ServerCapabilities
+ version semver.Version
+}
+
+const (
+ // MinimalVersionConstraint is the minimal version that Cilium supports to
+ // run kubernetes.
+ MinimalVersionConstraint = "1.16.0"
+)
+
+var (
+ cached = cachedVersion{}
+
+ discoveryAPIGroupV1beta1 = "discovery.k8s.io/v1beta1"
+ discoveryAPIGroupV1 = "discovery.k8s.io/v1"
+ coordinationV1APIGroup = "coordination.k8s.io/v1"
+ endpointSliceKind = "EndpointSlice"
+ leaseKind = "Lease"
+
+ // Constraint to check support for discovery/v1 types. Support for v1
+ // discovery was introduced in K8s version 1.21.
+ isGEThanAPIDiscoveryV1 = versioncheck.MustCompile(">=1.21.0")
+
+ // Constraint to check support for discovery/v1beta1 types. Support for
+ // v1beta1 discovery was introduced in K8s version 1.17.
+ isGEThanAPIDiscoveryV1Beta1 = versioncheck.MustCompile(">=1.17.0")
+
+ // isGEThanMinimalVersionConstraint is the minimal version required to run
+ // Cilium
+ isGEThanMinimalVersionConstraint = versioncheck.MustCompile(">=" + MinimalVersionConstraint)
+)
+
+// Version returns the version of the Kubernetes apiserver
+func Version() semver.Version {
+ cached.mutex.RLock()
+ c := cached.version
+ cached.mutex.RUnlock()
+ return c
+}
+
+// Capabilities returns the capabilities of the Kubernetes apiserver
+func Capabilities() ServerCapabilities {
+ cached.mutex.RLock()
+ c := cached.capabilities
+ cached.mutex.RUnlock()
+ return c
+}
+
+func DisableLeasesResourceLock() {
+ cached.mutex.Lock()
+ defer cached.mutex.Unlock()
+ cached.capabilities.LeasesResourceLock = false
+}
+
+func updateVersion(version semver.Version) {
+ cached.mutex.Lock()
+ defer cached.mutex.Unlock()
+
+ cached.version = version
+
+ cached.capabilities.MinimalVersionMet = isGEThanMinimalVersionConstraint(version)
+ cached.capabilities.EndpointSliceV1 = isGEThanAPIDiscoveryV1(version)
+ cached.capabilities.EndpointSlice = isGEThanAPIDiscoveryV1Beta1(version)
+}
+
+func updateServerGroupsAndResources(apiResourceLists []*metav1.APIResourceList) {
+ cached.mutex.Lock()
+ defer cached.mutex.Unlock()
+
+ cached.capabilities.EndpointSlice = false
+ cached.capabilities.EndpointSliceV1 = false
+ cached.capabilities.LeasesResourceLock = false
+ for _, rscList := range apiResourceLists {
+ if rscList.GroupVersion == discoveryAPIGroupV1beta1 {
+ for _, rsc := range rscList.APIResources {
+ if rsc.Kind == endpointSliceKind {
+ cached.capabilities.EndpointSlice = true
+ break
+ }
+ }
+ }
+ if rscList.GroupVersion == discoveryAPIGroupV1 {
+ for _, rsc := range rscList.APIResources {
+ if rsc.Kind == endpointSliceKind {
+ cached.capabilities.EndpointSlice = true
+ cached.capabilities.EndpointSliceV1 = true
+ break
+ }
+ }
+ }
+
+ if rscList.GroupVersion == coordinationV1APIGroup {
+ for _, rsc := range rscList.APIResources {
+ if rsc.Kind == leaseKind {
+ cached.capabilities.LeasesResourceLock = true
+ break
+ }
+ }
+ }
+ }
+}
+
+// Force forces the use of a specific version
+func Force(version string) error {
+ ver, err := versioncheck.Version(version)
+ if err != nil {
+ return err
+ }
+ updateVersion(ver)
+ return nil
+}
+
+func endpointSlicesFallbackDiscovery(client kubernetes.Interface) error {
+ // If a k8s version with discovery v1 is used, then do not even bother
+ // checking for v1beta1
+ cached.mutex.Lock()
+ if cached.capabilities.EndpointSliceV1 {
+ cached.capabilities.EndpointSlice = true
+ cached.mutex.Unlock()
+ return nil
+ }
+ cached.mutex.Unlock()
+
+ // Discovery of API groups requires the API services of the apiserver to be
+ // healthy. Such API services can depend on the readiness of regular pods
+ // which require Cilium to function correctly. By treating failure to
+ // discover API groups as fatal, a critial loop can be entered in which
+ // Cilium cannot start because the API groups can't be discovered.
+ //
+ // Here we acknowledge the lack of discovery ability as non Fatal and fall back to probing
+ // the API directly.
+ _, err := client.DiscoveryV1beta1().EndpointSlices("default").Get(context.TODO(), "kubernetes", metav1.GetOptions{})
+ if err == nil {
+ cached.mutex.Lock()
+ cached.capabilities.EndpointSlice = true
+ cached.mutex.Unlock()
+ return nil
+ }
+
+ if errors.IsNotFound(err) {
+ log.WithError(err).Info("Unable to retrieve EndpointSlices for default/kubernetes. Disabling EndpointSlices")
+ // StatusNotFound is a safe error, EndpointSlices are
+ // disabled and the agent can continue.
+ return nil
+ }
+
+ // Unknown error, we can't derive whether to enable or disable
+ // EndpointSlices and need to error out.
+ return fmt.Errorf("unable to validate EndpointSlices support: %s", err)
+}
+
+func leasesFallbackDiscovery(client kubernetes.Interface, apiDiscoveryEnabled bool) error {
+ // apiDiscoveryEnabled is used to fallback leases discovery to directly
+ // probing the API when we cannot discover API groups.
+ // We require to check for Leases capabilities in operator only, which uses Leases
+ // for leader election purposes in HA mode.
+ if !apiDiscoveryEnabled {
+ log.Debugf("Skipping Leases support fallback discovery")
+ return nil
+ }
+
+ // Similar to endpointSlicesFallbackDiscovery here we fallback to probing the Kubernetes
+ // API directly. `kube-controller-manager` creates a lease in the kube-system namespace
+ // and here we try and see if that Lease exists.
+ _, err := client.CoordinationV1().Leases("kube-system").Get(context.TODO(), "kube-controller-manager", metav1.GetOptions{})
+ if err == nil {
+ cached.mutex.Lock()
+ cached.capabilities.LeasesResourceLock = true
+ cached.mutex.Unlock()
+ return nil
+ }
+
+ if errors.IsNotFound(err) {
+ log.WithError(err).Info("Unable to retrieve Leases for kube-controller-manager. Disabling LeasesResourceLock")
+ // StatusNotFound is a safe error, Leases are
+ // disabled and the agent can continue
+ return nil
+ }
+
+ // Unknown error, we can't derive whether to enable or disable
+ // LeasesResourceLock and need to error out
+ return fmt.Errorf("unable to validate LeasesResourceLock support: %s", err)
+}
+
+func updateK8sServerVersion(client kubernetes.Interface) error {
+ var ver semver.Version
+
+ sv, err := client.Discovery().ServerVersion()
+ if err != nil {
+ return err
+ }
+
+ // Try GitVersion first. In case of error fallback to MajorMinor
+ if sv.GitVersion != "" {
+ // This is a string like "v1.9.0"
+ ver, err = versioncheck.Version(sv.GitVersion)
+ if err == nil {
+ updateVersion(ver)
+ return nil
+ }
+ }
+
+ if sv.Major != "" && sv.Minor != "" {
+ ver, err = versioncheck.Version(fmt.Sprintf("%s.%s", sv.Major, sv.Minor))
+ if err == nil {
+ updateVersion(ver)
+ return nil
+ }
+ }
+
+ return fmt.Errorf("cannot parse k8s server version from %+v: %s", sv, err)
+}
+
+// Update retrieves the version of the Kubernetes apiserver and derives the
+// capabilities. This function must be called after connectivity to the
+// apiserver has been established.
+//
+// Discovery of capabilities only works if the discovery API of the apiserver
+// is functional. If it is not available, a warning is logged and the discovery
+// falls back to probing individual API endpoints.
+func Update(client kubernetes.Interface, apiDiscoveryEnabled bool) error {
+ err := updateK8sServerVersion(client)
+ if err != nil {
+ return err
+ }
+
+ if apiDiscoveryEnabled {
+ // Discovery of API groups requires the API services of the
+ // apiserver to be healthy. Such API services can depend on the
+ // readiness of regular pods which require Cilium to function
+ // correctly. By treating failure to discover API groups as
+ // fatal, a critical loop can be entered in which Cilium cannot
+ // start because the API groups can't be discovered and th API
+ // groups will only become discoverable once Cilium is up.
+ _, apiResourceLists, err := client.Discovery().ServerGroupsAndResources()
+ if err != nil {
+ // It doesn't make sense to retry the retrieval of this
+ // information at a later point because the capabilities are
+ // primiarly used while the agent is starting up. Instead, fall
+ // back to probing API endpoints directly.
+ log.WithError(err).Warning("Unable to discover API groups and resources")
+ if err := endpointSlicesFallbackDiscovery(client); err != nil {
+ return err
+ }
+
+ return leasesFallbackDiscovery(client, apiDiscoveryEnabled)
+ }
+
+ updateServerGroupsAndResources(apiResourceLists)
+ } else {
+ if err := endpointSlicesFallbackDiscovery(client); err != nil {
+ return err
+ }
+
+ return leasesFallbackDiscovery(client, apiDiscoveryEnabled)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/kvstore/allocator/allocator.go b/vendor/github.com/cilium/cilium/pkg/kvstore/allocator/allocator.go
new file mode 100644
index 000000000..bfc213f5a
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/kvstore/allocator/allocator.go
@@ -0,0 +1,637 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package allocator
+
+import (
+ "context"
+ "fmt"
+ "path"
+ "strconv"
+ "strings"
+
+ "github.com/sirupsen/logrus"
+
+ "github.com/cilium/cilium/pkg/allocator"
+ "github.com/cilium/cilium/pkg/idpool"
+ "github.com/cilium/cilium/pkg/kvstore"
+ "github.com/cilium/cilium/pkg/logging"
+ "github.com/cilium/cilium/pkg/logging/logfields"
+ "github.com/cilium/cilium/pkg/rate"
+)
+
+var (
+ log = logging.DefaultLogger.WithField(logfields.LogSubsys, "kvstorebackend")
+)
+
+// kvstoreBackend is an implementaton of pkg/allocator.Backend. It store
+// identities in the following format:
+//
+// Slave keys:
+//
+// Slave keys are owned by individual nodes:
+// - basePath/value/key1/node1 => 1001
+// - basePath/value/key1/node2 => 1001
+// - basePath/value/key2/node1 => 1002
+// - basePath/value/key2/node2 => 1002
+//
+// If at least one key exists with the prefix basePath/value/keyN then that
+// key must be considered to be in use in the allocation space.
+//
+// Slave keys are protected by a lease and will automatically get removed
+// after ~ option.Config.KVstoreLeaseTTL if the node does not renew in time.
+//
+// Master key:
+// - basePath/id/1001 => key1
+// - basePath/id/1002 => key2
+//
+// Master keys provide the mapping from ID to key. As long as a master key
+// for an ID exists, the ID is still in use. However, if a master key is no
+// longer backed by at least one slave key, the garbage collector will
+// eventually release the master key and return it back to the pool.
+type kvstoreBackend struct {
+ // lockless is true if allocation can be done lockless. This depends on
+ // the underlying kvstore backend
+ lockless bool
+
+ // basePrefix is the prefix in the kvstore that all keys share which
+ // are being managed by this allocator. The basePrefix typically
+ // consists of something like: "space/project/allocatorName"
+ basePrefix string
+
+ // idPrefix is the kvstore key prefix for all master keys. It is being
+ // derived from the basePrefix.
+ idPrefix string
+
+ // valuePrefix is the kvstore key prefix for all slave keys. It is
+ // being derived from the basePrefix.
+ valuePrefix string
+
+ // lockPrefix is the prefix to use for all kvstore locks. This prefix
+ // is different from the idPrefix and valuePrefix to simplify watching
+ // for ID and key changes.
+ lockPrefix string
+
+ // suffix is the suffix attached to keys which must be node specific,
+ // this is typical set to the node's IP address
+ suffix string
+
+ backend kvstore.BackendOperations
+
+ keyType allocator.AllocatorKey
+}
+
+func locklessCapability(backend kvstore.BackendOperations) bool {
+ required := kvstore.CapabilityCreateIfExists | kvstore.CapabilityDeleteOnZeroCount
+ return backend.GetCapabilities()&required == required
+}
+
+func prefixMatchesKey(prefix, key string) bool {
+ // cilium/state/identities/v1/value/label;foo;bar;/172.0.124.60
+ lastSlash := strings.LastIndex(key, "/")
+ return len(prefix) == lastSlash
+}
+
+// NewKVStoreBackend creates a pkg/allocator.Backend compatible instance. The
+// specific kvstore used is configured in pkg/kvstore.
+func NewKVStoreBackend(basePath, suffix string, typ allocator.AllocatorKey, backend kvstore.BackendOperations) (*kvstoreBackend, error) {
+ if backend == nil {
+ return nil, fmt.Errorf("kvstore client not configured")
+ }
+
+ return &kvstoreBackend{
+ basePrefix: basePath,
+ idPrefix: path.Join(basePath, "id"),
+ valuePrefix: path.Join(basePath, "value"),
+ lockPrefix: path.Join(basePath, "locks"),
+ suffix: suffix,
+ keyType: typ,
+ lockless: locklessCapability(backend),
+ backend: backend,
+ }, nil
+}
+
+// lockPath locks a key in the scope of an allocator
+func (k *kvstoreBackend) lockPath(ctx context.Context, key string) (*kvstore.Lock, error) {
+ suffix := strings.TrimPrefix(key, k.basePrefix)
+ return kvstore.LockPath(ctx, k.backend, path.Join(k.lockPrefix, suffix))
+}
+
+// DeleteAllKeys will delete all keys
+func (k *kvstoreBackend) DeleteAllKeys(ctx context.Context) {
+ k.backend.DeletePrefix(ctx, k.basePrefix)
+}
+
+// AllocateID allocates a key->ID mapping in the kvstore.
+func (k *kvstoreBackend) AllocateID(ctx context.Context, id idpool.ID, key allocator.AllocatorKey) error {
+ // create /id/ and fail if it already exists
+ keyPath := path.Join(k.idPrefix, id.String())
+ keyEncoded := []byte(k.backend.Encode([]byte(key.GetKey())))
+ success, err := k.backend.CreateOnly(ctx, keyPath, keyEncoded, false)
+ if err != nil || !success {
+ return fmt.Errorf("unable to create master key '%s': %s", keyPath, err)
+ }
+
+ return nil
+}
+
+// AllocateID allocates a key->ID mapping in the kvstore.
+func (k *kvstoreBackend) AllocateIDIfLocked(ctx context.Context, id idpool.ID, key allocator.AllocatorKey, lock kvstore.KVLocker) error {
+ // create /id/ and fail if it already exists
+ keyPath := path.Join(k.idPrefix, id.String())
+ keyEncoded := []byte(k.backend.Encode([]byte(key.GetKey())))
+ success, err := k.backend.CreateOnlyIfLocked(ctx, keyPath, keyEncoded, false, lock)
+ if err != nil || !success {
+ return fmt.Errorf("unable to create master key '%s': %s", keyPath, err)
+ }
+
+ return nil
+}
+
+// AcquireReference marks that this node is using this key->ID mapping in the kvstore.
+func (k *kvstoreBackend) AcquireReference(ctx context.Context, id idpool.ID, key allocator.AllocatorKey, lock kvstore.KVLocker) error {
+ keyString := k.backend.Encode([]byte(key.GetKey()))
+ if err := k.createValueNodeKey(ctx, keyString, id, lock); err != nil {
+ return fmt.Errorf("unable to create slave key '%s': %s", keyString, err)
+ }
+ return nil
+}
+
+// createValueKey records that this "node" is using this key->ID
+func (k *kvstoreBackend) createValueNodeKey(ctx context.Context, key string, newID idpool.ID, lock kvstore.KVLocker) error {
+ // add a new key /value// to account for the reference
+ // The key is protected with a TTL/lease and will expire after LeaseTTL
+ valueKey := path.Join(k.valuePrefix, key, k.suffix)
+ if _, err := k.backend.UpdateIfDifferentIfLocked(ctx, valueKey, []byte(newID.String()), true, lock); err != nil {
+ return fmt.Errorf("unable to create value-node key '%s': %s", valueKey, err)
+ }
+
+ return nil
+}
+
+// Lock locks a key in the scope of an allocator
+func (k *kvstoreBackend) lock(ctx context.Context, key string) (*kvstore.Lock, error) {
+ suffix := strings.TrimPrefix(key, k.basePrefix)
+ return kvstore.LockPath(ctx, k.backend, path.Join(k.lockPrefix, suffix))
+}
+
+// Lock locks a key in the scope of an allocator
+func (k *kvstoreBackend) Lock(ctx context.Context, key allocator.AllocatorKey) (kvstore.KVLocker, error) {
+ return k.lock(ctx, k.backend.Encode([]byte(key.GetKey())))
+}
+
+// Get returns the ID which is allocated to a key in the kvstore
+func (k *kvstoreBackend) Get(ctx context.Context, key allocator.AllocatorKey) (idpool.ID, error) {
+ // ListPrefix() will return all keys matching the prefix, the prefix
+ // can cover multiple different keys, example:
+ //
+ // key1 := label1;label2;
+ // key2 := label1;label2;label3;
+ //
+ // In order to retrieve the correct key, the position of the last '/'
+ // is significant, e.g.
+ //
+ // prefix := cilium/state/identities/v1/value/label;foo;
+ //
+ // key1 := cilium/state/identities/v1/value/label;foo;/172.0.124.60
+ // key2 := cilium/state/identities/v1/value/label;foo;bar;/172.0.124.60
+ //
+ // Only key1 should match
+ prefix := path.Join(k.valuePrefix, k.backend.Encode([]byte(key.GetKey())))
+ pairs, err := k.backend.ListPrefix(ctx, prefix)
+ kvstore.Trace("ListPrefix", err, logrus.Fields{fieldPrefix: prefix, "entries": len(pairs)})
+ if err != nil {
+ return 0, err
+ }
+
+ for k, v := range pairs {
+ if prefixMatchesKey(prefix, k) {
+ id, err := strconv.ParseUint(string(v.Data), 10, 64)
+ if err == nil {
+ return idpool.ID(id), nil
+ }
+ }
+ }
+
+ return idpool.NoID, nil
+}
+
+// GetIfLocked returns the ID which is allocated to a key in the kvstore
+// if the client is still holding the given lock.
+func (k *kvstoreBackend) GetIfLocked(ctx context.Context, key allocator.AllocatorKey, lock kvstore.KVLocker) (idpool.ID, error) {
+ // ListPrefixIfLocked() will return all keys matching the prefix, the prefix
+ // can cover multiple different keys, example:
+ //
+ // key1 := label1;label2;
+ // key2 := label1;label2;label3;
+ //
+ // In order to retrieve the correct key, the position of the last '/'
+ // is significant, e.g.
+ //
+ // prefix := cilium/state/identities/v1/value/label;foo;
+ //
+ // key1 := cilium/state/identities/v1/value/label;foo;/172.0.124.60
+ // key2 := cilium/state/identities/v1/value/label;foo;bar;/172.0.124.60
+ //
+ // Only key1 should match
+ prefix := path.Join(k.valuePrefix, k.backend.Encode([]byte(key.GetKey())))
+ pairs, err := k.backend.ListPrefixIfLocked(ctx, prefix, lock)
+ kvstore.Trace("ListPrefixLocked", err, logrus.Fields{fieldPrefix: prefix, "entries": len(pairs)})
+ if err != nil {
+ return 0, err
+ }
+
+ for k, v := range pairs {
+ if prefixMatchesKey(prefix, k) {
+ id, err := strconv.ParseUint(string(v.Data), 10, 64)
+ if err == nil {
+ return idpool.ID(id), nil
+ }
+ }
+ }
+
+ return idpool.NoID, nil
+}
+
+// GetByID returns the key associated with an ID. Returns nil if no key is
+// associated with the ID.
+func (k *kvstoreBackend) GetByID(ctx context.Context, id idpool.ID) (allocator.AllocatorKey, error) {
+ v, err := k.backend.Get(ctx, path.Join(k.idPrefix, id.String()))
+ if err != nil {
+ return nil, err
+ }
+
+ if v == nil {
+ return nil, nil
+ }
+
+ s, err := k.backend.Decode(string(v))
+ if err != nil {
+ return nil, err
+ }
+
+ return k.keyType.PutKey(string(s)), nil
+}
+
+// UpdateKey refreshes the record that this node is using this key -> id
+// mapping. When reliablyMissing is set it will also recreate missing master or
+// slave keys.
+func (k *kvstoreBackend) UpdateKey(ctx context.Context, id idpool.ID, key allocator.AllocatorKey, reliablyMissing bool) error {
+ var (
+ err error
+ recreated bool
+ keyPath = path.Join(k.idPrefix, id.String())
+ keyEncoded = []byte(k.backend.Encode([]byte(key.GetKey())))
+ valueKey = path.Join(k.valuePrefix, k.backend.Encode([]byte(key.GetKey())), k.suffix)
+ )
+
+ // Use of CreateOnly() ensures that any existing potentially
+ // conflicting key is never overwritten.
+ success, err := k.backend.CreateOnly(ctx, keyPath, keyEncoded, false)
+ switch {
+ case err != nil:
+ return fmt.Errorf("Unable to re-create missing master key \"%s\" -> \"%s\": %s", fieldKey, valueKey, err)
+ case success:
+ log.WithField(fieldKey, keyPath).Warning("Re-created missing master key")
+ }
+
+ // Also re-create the slave key in case it has been deleted. This will
+ // ensure that the next garbage collection cycle of any participating
+ // node does not remove the master key again.
+ if reliablyMissing {
+ recreated, err = k.backend.CreateOnly(ctx, valueKey, []byte(id.String()), true)
+ } else {
+ recreated, err = k.backend.UpdateIfDifferent(ctx, valueKey, []byte(id.String()), true)
+ }
+ switch {
+ case err != nil:
+ return fmt.Errorf("Unable to re-create missing slave key \"%s\" -> \"%s\": %s", fieldKey, valueKey, err)
+ case recreated:
+ log.WithField(fieldKey, valueKey).Warning("Re-created missing slave key")
+ }
+
+ return nil
+}
+
+// UpdateKeyIfLocked refreshes the record that this node is using this key -> id
+// mapping. When reliablyMissing is set it will also recreate missing master or
+// slave keys.
+func (k *kvstoreBackend) UpdateKeyIfLocked(ctx context.Context, id idpool.ID, key allocator.AllocatorKey, reliablyMissing bool, lock kvstore.KVLocker) error {
+ var (
+ err error
+ recreated bool
+ keyPath = path.Join(k.idPrefix, id.String())
+ keyEncoded = []byte(k.backend.Encode([]byte(key.GetKey())))
+ valueKey = path.Join(k.valuePrefix, k.backend.Encode([]byte(key.GetKey())), k.suffix)
+ )
+
+ // Use of CreateOnly() ensures that any existing potentially
+ // conflicting key is never overwritten.
+ success, err := k.backend.CreateOnlyIfLocked(ctx, keyPath, keyEncoded, false, lock)
+ switch {
+ case err != nil:
+ return fmt.Errorf("Unable to re-create missing master key \"%s\" -> \"%s\": %s", fieldKey, valueKey, err)
+ case success:
+ log.WithField(fieldKey, keyPath).Warning("Re-created missing master key")
+ }
+
+ // Also re-create the slave key in case it has been deleted. This will
+ // ensure that the next garbage collection cycle of any participating
+ // node does not remove the master key again.
+ // lock is ignored since the key doesn't exist.
+ if reliablyMissing {
+ recreated, err = k.backend.CreateOnly(ctx, valueKey, []byte(id.String()), true)
+ } else {
+ recreated, err = k.backend.UpdateIfDifferentIfLocked(ctx, valueKey, []byte(id.String()), true, lock)
+ }
+ switch {
+ case err != nil:
+ return fmt.Errorf("Unable to re-create missing slave key \"%s\" -> \"%s\": %s", fieldKey, valueKey, err)
+ case recreated:
+ log.WithField(fieldKey, valueKey).Warning("Re-created missing slave key")
+ }
+
+ return nil
+}
+
+// Release releases the use of an ID associated with the provided key. It does
+// not guard against concurrent releases. This is currently guarded by
+// Allocator.slaveKeysMutex when called from pkg/allocator.Allocator.Release.
+func (k *kvstoreBackend) Release(ctx context.Context, _ idpool.ID, key allocator.AllocatorKey) (err error) {
+ valueKey := path.Join(k.valuePrefix, k.backend.Encode([]byte(key.GetKey())), k.suffix)
+ log.WithField(fieldKey, key).Info("Released last local use of key, invoking global release")
+
+ // does not need to be deleted with a lock as its protected by the
+ // Allocator.slaveKeysMutex
+ if err := k.backend.Delete(ctx, valueKey); err != nil {
+ log.WithError(err).WithFields(logrus.Fields{fieldKey: key}).Warning("Ignoring node specific ID")
+ return err
+ }
+
+ // if k.lockless {
+ // FIXME: etcd 3.3 will make it possible to do a lockless
+ // cleanup of the ID and release it right away. For now we rely
+ // on the GC to kick in a release unused IDs.
+ // }
+
+ return nil
+}
+
+// RunLocksGC scans the kvstore for unused locks and removes them. Returns
+// a map of locks that are currently being held, including the ones that have
+// failed to be GCed.
+func (k *kvstoreBackend) RunLocksGC(ctx context.Context, staleKeysPrevRound map[string]kvstore.Value) (map[string]kvstore.Value, error) {
+ // fetch list of all /../locks keys
+ allocated, err := k.backend.ListPrefix(ctx, k.lockPrefix)
+ if err != nil {
+ return nil, fmt.Errorf("list failed: %s", err)
+ }
+
+ staleKeys := map[string]kvstore.Value{}
+
+ // iterate over /../locks
+ for key, v := range allocated {
+ scopedLog := log.WithFields(logrus.Fields{
+ fieldKey: key,
+ fieldLeaseID: strconv.FormatUint(uint64(v.LeaseID), 16),
+ })
+ // Only delete if this key was previously marked as to be deleted
+ if modRev, ok := staleKeysPrevRound[key]; ok &&
+ // comparing ModRevision ensures the same client is still holding
+ // this lock since the last GC was called.
+ modRev.ModRevision == v.ModRevision &&
+ modRev.LeaseID == v.LeaseID &&
+ modRev.SessionID == v.SessionID {
+ if err := k.backend.Delete(ctx, key); err == nil {
+ scopedLog.Warning("Forcefully removed distributed lock due to client staleness." +
+ " Please check the connectivity between the KVStore and the client with that lease ID.")
+ continue
+ }
+ scopedLog.WithError(err).
+ Warning("Unable to remove distributed lock due to client staleness." +
+ " Please check the connectivity between the KVStore and the client with that lease ID.")
+ }
+ // If the key was not found mark it to be delete in the next RunGC
+ staleKeys[key] = kvstore.Value{
+ ModRevision: v.ModRevision,
+ LeaseID: v.LeaseID,
+ SessionID: v.SessionID,
+ }
+ }
+
+ return staleKeys, nil
+}
+
+// RunGC scans the kvstore for unused master keys and removes them
+func (k *kvstoreBackend) RunGC(
+ ctx context.Context,
+ rateLimit *rate.Limiter,
+ staleKeysPrevRound map[string]uint64,
+ minID, maxID idpool.ID,
+) (map[string]uint64, *allocator.GCStats, error) {
+
+ // fetch list of all /id/ keys
+ allocated, err := k.backend.ListPrefix(ctx, k.idPrefix)
+ if err != nil {
+ return nil, nil, fmt.Errorf("list failed: %s", err)
+ }
+
+ totalEntries := len(allocated)
+ deletedEntries := 0
+
+ staleKeys := map[string]uint64{}
+
+ min := uint64(minID)
+ max := uint64(maxID)
+ reasonOutOfRange := "out of local cluster identity range [" + strconv.FormatUint(min, 10) + "," + strconv.FormatUint(max, 10) + "]"
+
+ // iterate over /id/
+ for key, v := range allocated {
+ // if k.lockless {
+ // FIXME: Add DeleteOnZeroCount support
+ // }
+
+ // Parse identity ID
+ items := strings.Split(key, "/")
+ if len(items) == 0 {
+ log.WithField(fieldKey, key).WithError(err).Warning("Unknown identity key found, skipping")
+ continue
+ }
+
+ if identityID, err := strconv.ParseUint(items[len(items)-1], 10, 64); err != nil {
+ log.WithField(fieldKey, key).WithError(err).Warning("Parse identity failed, skipping")
+ continue
+ } else {
+ // We should not GC those identities that are out of our scope
+ if identityID < min || identityID > max {
+ log.WithFields(logrus.Fields{
+ fieldKey: key,
+ "reason": reasonOutOfRange,
+ }).Debug("Skipping this key")
+ continue
+ }
+ }
+
+ lock, err := k.lockPath(ctx, key)
+ if err != nil {
+ log.WithError(err).WithField(fieldKey, key).Warning("allocator garbage collector was unable to lock key")
+ continue
+ }
+
+ // fetch list of all /value/ keys
+ valueKeyPrefix := path.Join(k.valuePrefix, string(v.Data))
+ pairs, err := k.backend.ListPrefixIfLocked(ctx, valueKeyPrefix, lock)
+ if err != nil {
+ log.WithError(err).WithField(fieldPrefix, valueKeyPrefix).Warning("allocator garbage collector was unable to list keys")
+ lock.Unlock(context.Background())
+ continue
+ }
+
+ hasUsers := false
+ for prefix := range pairs {
+ if prefixMatchesKey(valueKeyPrefix, prefix) {
+ hasUsers = true
+ break
+ }
+ }
+
+ var deleted bool
+ // if ID has no user, delete it
+ if !hasUsers {
+ scopedLog := log.WithFields(logrus.Fields{
+ fieldKey: key,
+ fieldID: path.Base(key),
+ })
+ // Only delete if this key was previously marked as to be deleted
+ if modRev, ok := staleKeysPrevRound[key]; ok {
+ // if the v.ModRevision is different than the modRev (which is
+ // the last seen v.ModRevision) then this key was re-used in
+ // between GC calls. We should not mark it as stale keys yet,
+ // but the next GC call will do it.
+ if modRev == v.ModRevision {
+ if err := k.backend.DeleteIfLocked(ctx, key, lock); err != nil {
+ scopedLog.WithError(err).Warning("Unable to delete unused allocator master key")
+ } else {
+ deletedEntries++
+ scopedLog.Info("Deleted unused allocator master key")
+ }
+ // consider the key regardless if there was an error from
+ // the kvstore. We want to rate limit the number of requests
+ // done to the KVStore.
+ deleted = true
+ }
+ } else {
+ // If the key was not found mark it to be delete in the next RunGC
+ staleKeys[key] = v.ModRevision
+ }
+ }
+
+ lock.Unlock(context.Background())
+ if deleted {
+ // Wait after deleted the key. This is not ideal because we have
+ // done the operation that should be rate limited before checking the
+ // rate limit. We have to do this here to avoid holding the global lock
+ // for a long period of time.
+ err = rateLimit.Wait(ctx)
+ if err != nil {
+ return nil, nil, err
+ }
+ }
+ }
+
+ gcStats := &allocator.GCStats{
+ Alive: totalEntries - deletedEntries,
+ Deleted: deletedEntries,
+ }
+ return staleKeys, gcStats, nil
+}
+
+func (k *kvstoreBackend) keyToID(key string) (id idpool.ID, err error) {
+ if !strings.HasPrefix(key, k.idPrefix) {
+ return idpool.NoID, fmt.Errorf("Found invalid key \"%s\" outside of prefix \"%s\"", key, k.idPrefix)
+ }
+
+ suffix := strings.TrimPrefix(key, k.idPrefix)
+ if suffix[0] == '/' {
+ suffix = suffix[1:]
+ }
+
+ idParsed, err := strconv.ParseUint(suffix, 10, 64)
+ if err != nil {
+ return idpool.NoID, fmt.Errorf("Cannot parse key suffix \"%s\"", suffix)
+ }
+
+ return idpool.ID(idParsed), nil
+}
+
+func (k *kvstoreBackend) ListAndWatch(ctx context.Context, handler allocator.CacheMutations, stopChan chan struct{}) {
+ watcher := k.backend.ListAndWatch(ctx, k.idPrefix, k.idPrefix, 512)
+
+ for {
+ select {
+ case event, ok := <-watcher.Events:
+ if !ok {
+ goto abort
+ }
+ if event.Typ == kvstore.EventTypeListDone {
+ handler.OnListDone()
+ continue
+ }
+
+ id, err := k.keyToID(event.Key)
+ switch {
+ case err != nil:
+ log.WithError(err).WithField(fieldKey, event.Key).Warning("Invalid key")
+
+ case id != idpool.NoID:
+ var key allocator.AllocatorKey
+
+ if len(event.Value) > 0 {
+ s, err := k.backend.Decode(string(event.Value))
+ if err != nil {
+ log.WithError(err).WithFields(logrus.Fields{
+ fieldKey: event.Key,
+ fieldValue: event.Value,
+ }).Warning("Unable to decode key value")
+ continue
+ }
+
+ key = k.keyType.PutKey(string(s))
+ } else {
+ if event.Typ != kvstore.EventTypeDelete {
+ log.WithFields(logrus.Fields{
+ fieldKey: event.Key,
+ fieldEventType: event.Typ,
+ }).Error("Received a key with an empty value")
+ continue
+ }
+ }
+
+ switch event.Typ {
+ case kvstore.EventTypeCreate:
+ handler.OnAdd(id, key)
+
+ case kvstore.EventTypeModify:
+ handler.OnModify(id, key)
+
+ case kvstore.EventTypeDelete:
+ handler.OnDelete(id, key)
+ }
+ }
+
+ case <-stopChan:
+ goto abort
+ }
+ }
+
+abort:
+ watcher.Stop()
+}
+
+func (k *kvstoreBackend) Status() (string, error) {
+ return k.backend.Status()
+}
+
+func (k *kvstoreBackend) Encode(v string) string {
+ return k.backend.Encode([]byte(v))
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/kvstore/allocator/doc.go b/vendor/github.com/cilium/cilium/pkg/kvstore/allocator/doc.go
new file mode 100644
index 000000000..26c9a9aad
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/kvstore/allocator/doc.go
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Package allocator provides a kvstore based ID allocator
+package allocator
diff --git a/vendor/github.com/cilium/cilium/pkg/kvstore/allocator/logfields.go b/vendor/github.com/cilium/cilium/pkg/kvstore/allocator/logfields.go
new file mode 100644
index 000000000..e6f033029
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/kvstore/allocator/logfields.go
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package allocator
+
+const (
+ fieldID = "id"
+ fieldKey = "key"
+ fieldPrefix = "prefix"
+ fieldValue = "value"
+ fieldLeaseID = "leaseID"
+ fieldEventType = "eventType"
+)
diff --git a/vendor/github.com/cilium/cilium/pkg/kvstore/backend.go b/vendor/github.com/cilium/cilium/pkg/kvstore/backend.go
new file mode 100644
index 000000000..37616ef71
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/kvstore/backend.go
@@ -0,0 +1,236 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package kvstore
+
+import (
+ "context"
+ "time"
+
+ "google.golang.org/grpc"
+)
+
+type backendOption struct {
+ // description is the description of the option
+ description string
+
+ // value is the value the option has been configured to
+ value string
+
+ // validate, if set, is called to validate the value before assignment
+ validate func(value string) error
+}
+
+type backendOptions map[string]*backendOption
+
+type ClusterSizeDependantIntervalFunc func(baseInterval time.Duration) time.Duration
+
+// ExtraOptions represents any options that can not be represented in a textual
+// format and need to be set programmatically.
+type ExtraOptions struct {
+ DialOption []grpc.DialOption
+
+ // ClusterSizeDependantInterval defines the function to calculate
+ // intervals based on cluster size
+ ClusterSizeDependantInterval ClusterSizeDependantIntervalFunc
+
+ // NoLockQuorumCheck disables the lock acquisition quorum check
+ NoLockQuorumCheck bool
+
+ // ClusterName is the name of each etcd cluster
+ ClusterName string
+}
+
+// StatusCheckInterval returns the interval of status checks depending on the
+// cluster size and the current connectivity state
+//
+// nodes OK Failing
+// 1 20s 3s
+// 4 45s 7s
+// 8 1m05s 11s
+// 32 1m45s 18s
+// 128 2m25s 24s
+// 512 3m07s 32s
+// 2048 3m46s 38s
+// 8192 4m30s 45s
+func (e *ExtraOptions) StatusCheckInterval(allConnected bool) time.Duration {
+ interval := 30 * time.Second
+
+ // Reduce the interval while connectivity issues are being detected
+ if !allConnected {
+ interval = 5 * time.Second
+ }
+
+ if e != nil && e.ClusterSizeDependantInterval != nil {
+ interval = e.ClusterSizeDependantInterval(interval)
+ }
+ return interval
+}
+
+// backendModule is the interface that each kvstore backend has to implement.
+type backendModule interface {
+ // getName must return the name of the backend
+ getName() string
+
+ // setConfig must configure the backend with the specified options.
+ // This function is called once before newClient().
+ setConfig(opts map[string]string) error
+
+ // setExtraConfig sets more options in the kvstore that are not able to
+ // be set by strings.
+ setExtraConfig(opts *ExtraOptions) error
+
+ // setConfigDummy must configure the backend with dummy configuration
+ // for testing purposes. This is a replacement for setConfig().
+ setConfigDummy()
+
+ // getConfig must return the backend configuration.
+ getConfig() map[string]string
+
+ // newClient must initializes the backend and create a new kvstore
+ // client which implements the BackendOperations interface
+ newClient(ctx context.Context, opts *ExtraOptions) (BackendOperations, chan error)
+
+ // createInstance creates a new instance of the module
+ createInstance() backendModule
+}
+
+var (
+ // registeredBackends is a slice of all backends that have registered
+ // itself via registerBackend()
+ registeredBackends = map[string]backendModule{}
+)
+
+// registerBackend must be called by kvstore backends to register themselves
+func registerBackend(name string, module backendModule) {
+ if _, ok := registeredBackends[name]; ok {
+ log.Panicf("backend with name '%s' already registered", name)
+ }
+
+ registeredBackends[name] = module
+}
+
+// getBackend finds a registered backend by name
+func getBackend(name string) backendModule {
+ if backend, ok := registeredBackends[name]; ok {
+ return backend.createInstance()
+ }
+
+ return nil
+}
+
+// BackendOperations are the individual kvstore operations that each backend
+// must implement. Direct use of this interface is possible but will bypass the
+// tracing layer.
+type BackendOperations interface {
+ // Connected returns a channel which is closed whenever the kvstore client
+ // is connected to the kvstore server.
+ Connected(ctx context.Context) <-chan error
+
+ // Disconnected returns a channel which is closed whenever the kvstore
+ // client is not connected to the kvstore server. (Only implemented for etcd)
+ Disconnected() <-chan struct{}
+
+ // Status returns the status of the kvstore client including an
+ // eventual error
+ Status() (string, error)
+
+ // StatusCheckErrors returns a channel which receives status check
+ // errors
+ StatusCheckErrors() <-chan error
+
+ // LockPath locks the provided path
+ LockPath(ctx context.Context, path string) (KVLocker, error)
+
+ // Get returns value of key
+ Get(ctx context.Context, key string) ([]byte, error)
+
+ // GetIfLocked returns value of key if the client is still holding the given lock.
+ GetIfLocked(ctx context.Context, key string, lock KVLocker) ([]byte, error)
+
+ // GetPrefix returns the first key which matches the prefix and its value
+ GetPrefix(ctx context.Context, prefix string) (string, []byte, error)
+
+ // GetPrefixIfLocked returns the first key which matches the prefix and its value if the client is still holding the given lock.
+ GetPrefixIfLocked(ctx context.Context, prefix string, lock KVLocker) (string, []byte, error)
+
+ // Set sets value of key
+ Set(ctx context.Context, key string, value []byte) error
+
+ // Delete deletes a key. It does not return an error if the key does not exist.
+ Delete(ctx context.Context, key string) error
+
+ // DeleteIfLocked deletes a key if the client is still holding the given lock. It does not return an error if the key does not exist.
+ DeleteIfLocked(ctx context.Context, key string, lock KVLocker) error
+
+ DeletePrefix(ctx context.Context, path string) error
+
+ // Update creates or updates a key.
+ Update(ctx context.Context, key string, value []byte, lease bool) error
+
+ // UpdateIfLocked updates a key if the client is still holding the given lock.
+ UpdateIfLocked(ctx context.Context, key string, value []byte, lease bool, lock KVLocker) error
+
+ // UpdateIfDifferent updates a key if the value is different
+ UpdateIfDifferent(ctx context.Context, key string, value []byte, lease bool) (bool, error)
+
+ // UpdateIfDifferentIfLocked updates a key if the value is different and if the client is still holding the given lock.
+ UpdateIfDifferentIfLocked(ctx context.Context, key string, value []byte, lease bool, lock KVLocker) (bool, error)
+
+ // CreateOnly atomically creates a key or fails if it already exists
+ CreateOnly(ctx context.Context, key string, value []byte, lease bool) (bool, error)
+
+ // CreateOnlyIfLocked atomically creates a key if the client is still holding the given lock or fails if it already exists
+ CreateOnlyIfLocked(ctx context.Context, key string, value []byte, lease bool, lock KVLocker) (bool, error)
+
+ // CreateIfExists creates a key with the value only if key condKey exists
+ CreateIfExists(ctx context.Context, condKey, key string, value []byte, lease bool) error
+
+ // ListPrefix returns a list of keys matching the prefix
+ ListPrefix(ctx context.Context, prefix string) (KeyValuePairs, error)
+
+ // ListPrefixIfLocked returns a list of keys matching the prefix only if the client is still holding the given lock.
+ ListPrefixIfLocked(ctx context.Context, prefix string, lock KVLocker) (KeyValuePairs, error)
+
+ // Watch starts watching for changes in a prefix. If list is true, the
+ // current keys matching the prefix will be listed and reported as new
+ // keys first.
+ Watch(ctx context.Context, w *Watcher)
+
+ // Close closes the kvstore client
+ Close(ctx context.Context)
+
+ // GetCapabilities returns the capabilities of the backend
+ GetCapabilities() Capabilities
+
+ // Encodes a binary slice into a character set that the backend
+ // supports
+ Encode(in []byte) string
+
+ // Decodes a key previously encoded back into the original binary slice
+ Decode(in string) ([]byte, error)
+
+ // ListAndWatch creates a new watcher which will watch the specified
+ // prefix for changes. Before doing this, it will list the current keys
+ // matching the prefix and report them as new keys. Name can be set to
+ // anything and is used for logging messages. The Events channel is
+ // created with the specified sizes. Upon every change observed, a
+ // KeyValueEvent will be sent to the Events channel
+ ListAndWatch(ctx context.Context, name, prefix string, chanSize int) *Watcher
+
+ // RegisterLeaseExpiredObserver registers a function which is executed when
+ // the lease associated with a key having the given prefix is detected as expired.
+ // If the function is nil, the previous observer (if any) is unregistered.
+ RegisterLeaseExpiredObserver(prefix string, fn func(key string))
+
+ BackendOperationsUserMgmt
+}
+
+// BackendOperationsUserMgmt are the kvstore operations for users management.
+type BackendOperationsUserMgmt interface {
+ // UserEnforcePresence creates a user in the kvstore if not already present, and grants the specified roles.
+ UserEnforcePresence(ctx context.Context, name string, roles []string) error
+
+ // UserEnforcePresence deletes a user from the kvstore, if present.
+ UserEnforceAbsence(ctx context.Context, name string) error
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/kvstore/backwards_compat.go b/vendor/github.com/cilium/cilium/pkg/kvstore/backwards_compat.go
new file mode 100644
index 000000000..a5417c5b1
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/kvstore/backwards_compat.go
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package kvstore
+
+import "context"
+
+const (
+ // OperationalPath is the base path to store the operational details in the kvstore.
+ OperationalPath = "cilium-net/operational"
+
+ // servicePathV1 is the base path for the services stored in the kvstore.
+ servicePathV1 = OperationalPath + "/Services/"
+)
+
+// deleteLegacyPrefixes removes old kvstore prefixes of non-persistent keys
+// which have been used in the past but have been obsoleted since. We remove
+// them on agent start to ensure that as users upgrade, we do not leave behind
+// stale keys
+//
+// Rules:
+//
+// - For non-persistent state, obsoletd prefixes can be deleted as soon as the
+// prefix has been declared obsolete
+// - For persistent configuration stored in the kvstore, a forward upgrade
+// path must be created which automatically removes the old keys on successful
+// translation.
+func deleteLegacyPrefixes(ctx context.Context) {
+ // Delete all keys in old services prefix
+ Client().DeletePrefix(ctx, servicePathV1)
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/kvstore/cell.go b/vendor/github.com/cilium/cilium/pkg/kvstore/cell.go
new file mode 100644
index 000000000..55116aff0
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/kvstore/cell.go
@@ -0,0 +1,155 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package kvstore
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "sync"
+ "time"
+
+ "github.com/spf13/pflag"
+
+ "github.com/cilium/cilium/pkg/defaults"
+ "github.com/cilium/cilium/pkg/hive"
+ "github.com/cilium/cilium/pkg/hive/cell"
+ "github.com/cilium/cilium/pkg/logging/logfields"
+ "github.com/cilium/cilium/pkg/option"
+ "github.com/cilium/cilium/pkg/promise"
+)
+
+// Cell returns a cell which provides a promise for the global kvstore client.
+// The parameter allows to customize the default backend, which can be either
+// set to a specific value (e.g., in the case of clustermesh-apiserver) or
+// left unset.
+var Cell = func(defaultBackend string) cell.Cell {
+ return cell.Module(
+ "kvstore-client",
+ "KVStore Client",
+
+ cell.Config(config{
+ KVStore: defaultBackend,
+ KVStoreConnectivityTimeout: defaults.KVstoreConnectivityTimeout,
+ KVStoreLeaseTTL: defaults.KVstoreLeaseTTL,
+ KVStorePeriodicSync: defaults.KVstorePeriodicSync,
+ }),
+
+ cell.Provide(func(lc hive.Lifecycle, shutdowner hive.Shutdowner, cfg config, opts *ExtraOptions) promise.Promise[BackendOperations] {
+ resolver, promise := promise.New[BackendOperations]()
+ if cfg.KVStore == "" {
+ log.Info("Skipping connection to kvstore, as not configured")
+ resolver.Reject(errors.New("kvstore not configured"))
+ return promise
+ }
+
+ // Propagate the options to the global variables for backward compatibility
+ option.Config.KVStore = cfg.KVStore
+ option.Config.KVStoreOpt = cfg.KVStoreOpt
+ option.Config.KVstoreConnectivityTimeout = cfg.KVStoreConnectivityTimeout
+ option.Config.KVstoreLeaseTTL = cfg.KVStoreLeaseTTL
+ option.Config.KVstorePeriodicSync = cfg.KVStorePeriodicSync
+
+ ctx, cancel := context.WithCancel(context.Background())
+ var wg sync.WaitGroup
+
+ lc.Append(hive.Hook{
+ OnStart: func(hive.HookContext) error {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+
+ log := log.WithField(logfields.BackendName, cfg.KVStore)
+ log.Info("Establishing connection to kvstore")
+ backend, errCh := NewClient(ctx, cfg.KVStore, cfg.KVStoreOpt, opts)
+
+ if err, isErr := <-errCh; isErr {
+ log.WithError(err).Error("Failed to establish connection to kvstore")
+ resolver.Reject(fmt.Errorf("failed connecting to kvstore: %w", err))
+ shutdowner.Shutdown(hive.ShutdownWithError(err))
+ return
+ }
+
+ log.Info("Connection to kvstore successfully established")
+ resolver.Resolve(backend)
+ }()
+ return nil
+ },
+ OnStop: func(hive.HookContext) error {
+ cancel()
+ wg.Wait()
+
+ // We don't explicitly close the backend here, because that would
+ // attempt to revoke the lease, causing all entries associated
+ // with that lease to be deleted. This would not be the
+ // behavior expected by the consumers of this cell.
+ return nil
+ },
+ })
+
+ return promise
+ }),
+ )
+}
+
+type config struct {
+ KVStore string
+ KVStoreOpt map[string]string
+ KVStoreConnectivityTimeout time.Duration
+ KVStoreLeaseTTL time.Duration
+ KVStorePeriodicSync time.Duration
+}
+
+func (def config) Flags(flags *pflag.FlagSet) {
+ flags.String(option.KVStore, def.KVStore, "Key-value store type")
+
+ flags.StringToString(option.KVStoreOpt, def.KVStoreOpt,
+ "Key-value store options e.g. etcd.address=127.0.0.1:4001")
+
+ flags.Duration(option.KVstoreConnectivityTimeout, def.KVStoreConnectivityTimeout,
+ "Time after which an incomplete kvstore operation is considered failed")
+
+ flags.Duration(option.KVstoreLeaseTTL, def.KVStoreLeaseTTL,
+ "Time-to-live for the KVstore lease.")
+ flags.MarkHidden(option.KVstoreLeaseTTL)
+
+ flags.Duration(option.KVstorePeriodicSync, def.KVStorePeriodicSync,
+ "Periodic KVstore synchronization interval")
+}
+
+// GlobalUserMgmtClientPromiseCell provides a promise returning the global kvstore client to perform users
+// management operations, once it has been initialized.
+var GlobalUserMgmtClientPromiseCell = cell.Module(
+ "global-kvstore-users-client",
+ "Global KVStore Users Management Client Promise",
+
+ cell.Provide(func(lc hive.Lifecycle, backendPromise promise.Promise[BackendOperations]) promise.Promise[BackendOperationsUserMgmt] {
+ resolver, promise := promise.New[BackendOperationsUserMgmt]()
+ ctx, cancel := context.WithCancel(context.Background())
+ var wg sync.WaitGroup
+
+ lc.Append(hive.Hook{
+ OnStart: func(hive.HookContext) error {
+ wg.Add(1)
+ go func() {
+ backend, err := backendPromise.Await(ctx)
+ if err != nil {
+ resolver.Reject(err)
+ } else {
+ resolver.Resolve(backend)
+ }
+ wg.Done()
+ }()
+ return nil
+ },
+ OnStop: func(hive.HookContext) error {
+ cancel()
+ wg.Wait()
+ return nil
+ },
+ })
+
+ return promise
+ }),
+)
diff --git a/vendor/github.com/cilium/cilium/pkg/kvstore/client.go b/vendor/github.com/cilium/cilium/pkg/kvstore/client.go
new file mode 100644
index 000000000..3fdc3521c
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/kvstore/client.go
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package kvstore
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/cilium/cilium/pkg/option"
+)
+
+var (
+ // defaultClient is the default client initialized by initClient
+ defaultClient BackendOperations
+ // defaultClientSet is a channel that is closed whenever the defaultClient
+ // is set.
+ defaultClientSet = make(chan struct{})
+)
+
+func initClient(ctx context.Context, module backendModule, opts *ExtraOptions) error {
+ scopedLog := log.WithField(fieldKVStoreModule, module.getName())
+ c, errChan := module.newClient(ctx, opts)
+ if c == nil {
+ err := <-errChan
+ scopedLog.WithError(err).Fatal("Unable to create kvstore client")
+ }
+
+ defaultClient = c
+ select {
+ case <-defaultClientSet:
+ // avoid closing channel already closed.
+ default:
+ close(defaultClientSet)
+ }
+
+ go func() {
+ err, isErr := <-errChan
+ if isErr && err != nil {
+ scopedLog.WithError(err).Fatal("Unable to connect to kvstore")
+ }
+ if !option.Config.JoinCluster {
+ deleteLegacyPrefixes(ctx)
+ }
+ }()
+
+ return nil
+}
+
+// Client returns the global kvstore, blocking until it has been configured
+func Client() BackendOperations {
+ <-defaultClientSet
+ return defaultClient
+}
+
+// NewClient returns a new kvstore client based on the configuration
+func NewClient(ctx context.Context, selectedBackend string, opts map[string]string, options *ExtraOptions) (BackendOperations, chan error) {
+ // Channel used to report immediate errors, module.newClient will
+ // create and return a different channel, caller doesn't need to know
+ errChan := make(chan error, 1)
+ defer close(errChan)
+
+ module := getBackend(selectedBackend)
+ if module == nil {
+ errChan <- fmt.Errorf("unknown key-value store type %q. See cilium.link/err-kvstore for details", selectedBackend)
+ return nil, errChan
+ }
+
+ if err := module.setConfig(opts); err != nil {
+ errChan <- err
+ return nil, errChan
+ }
+
+ if err := module.setExtraConfig(options); err != nil {
+ errChan <- err
+ return nil, errChan
+ }
+
+ return module.newClient(ctx, options)
+}
+
+// Connected returns a channel which is closed when the following conditions
+// are being met at the same time:
+// * The kvstore client is configured
+// * Connectivity to the kvstore has been established
+// * The kvstore has quorum
+//
+// The channel will *not* be closed if the kvstore client is closed before
+// connectivity or quorum has been achieved. It will wait until a new kvstore
+// client is configured to again wait for connectivity and quorum.
+func Connected() <-chan struct{} {
+ c := make(chan struct{})
+ go func(c chan struct{}) {
+ for {
+ if err := <-Client().Connected(context.Background()); err == nil {
+ close(c)
+ return
+ }
+ time.Sleep(100 * time.Millisecond)
+ }
+ }(c)
+ return c
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/kvstore/config.go b/vendor/github.com/cilium/cilium/pkg/kvstore/config.go
new file mode 100644
index 000000000..7e85d4f06
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/kvstore/config.go
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package kvstore
+
+import (
+ "context"
+ "fmt"
+ "sync"
+
+ "github.com/cilium/cilium/pkg/logging/logfields"
+)
+
+var (
+ // selectedModule is the name of the selected backend module
+ selectedModule string
+)
+
+// setOpts validates the specified options against the selected backend and
+// then modifies the configuration
+func setOpts(opts map[string]string, supportedOpts backendOptions) error {
+ errors := 0
+
+ for key, val := range opts {
+ opt, ok := supportedOpts[key]
+ if !ok {
+ errors++
+ log.WithField(logfields.Key, key).Error("unknown kvstore configuration key")
+ continue
+ }
+
+ if opt.validate != nil {
+ if err := opt.validate(val); err != nil {
+ log.WithError(err).Errorf("invalid value for key %s", key)
+ errors++
+ }
+ }
+
+ }
+
+ // if errors have occurred, print the supported configuration keys to
+ // the log
+ if errors > 0 {
+ log.Error("Supported configuration keys:")
+ for key, val := range supportedOpts {
+ log.Errorf(" %-12s %s", key, val.description)
+ }
+
+ return fmt.Errorf("invalid kvstore configuration, see log for details")
+ }
+
+ // modify the configuration atomically after verification
+ for key, val := range opts {
+ supportedOpts[key].value = val
+ }
+
+ return nil
+}
+
+func getOpts(opts backendOptions) map[string]string {
+ result := map[string]string{}
+
+ for key, opt := range opts {
+ result[key] = opt.value
+ }
+
+ return result
+}
+
+var (
+ setupOnce sync.Once
+)
+
+func setup(ctx context.Context, selectedBackend string, opts map[string]string, goOpts *ExtraOptions) error {
+ module := getBackend(selectedBackend)
+ if module == nil {
+ return fmt.Errorf("unknown key-value store type %q. See cilium.link/err-kvstore for details", selectedBackend)
+ }
+
+ if err := module.setConfig(opts); err != nil {
+ return err
+ }
+
+ if err := module.setExtraConfig(goOpts); err != nil {
+ return err
+ }
+
+ selectedModule = module.getName()
+
+ return initClient(ctx, module, goOpts)
+}
+
+// Setup sets up the key-value store specified in kvStore and configures it
+// with the options provided in opts
+func Setup(ctx context.Context, selectedBackend string, opts map[string]string, goOpts *ExtraOptions) error {
+ var err error
+
+ setupOnce.Do(func() {
+ err = setup(ctx, selectedBackend, opts, goOpts)
+ })
+
+ return err
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/kvstore/consul.go b/vendor/github.com/cilium/cilium/pkg/kvstore/consul.go
new file mode 100644
index 000000000..067fe02c0
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/kvstore/consul.go
@@ -0,0 +1,790 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package kvstore
+
+import (
+ "bytes"
+ "context"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "os"
+ "time"
+
+ consulAPI "github.com/hashicorp/consul/api"
+ "github.com/sirupsen/logrus"
+ "gopkg.in/yaml.v3"
+
+ "github.com/cilium/cilium/pkg/backoff"
+ "github.com/cilium/cilium/pkg/controller"
+ "github.com/cilium/cilium/pkg/inctimer"
+ "github.com/cilium/cilium/pkg/lock"
+ "github.com/cilium/cilium/pkg/logging/logfields"
+ "github.com/cilium/cilium/pkg/option"
+ "github.com/cilium/cilium/pkg/spanstat"
+)
+
+const (
+ consulName = "consul"
+
+ // ConsulAddrOption is the string representing the key mapping to the value of the
+ // address for Consul.
+ ConsulAddrOption = "consul.address"
+ ConsulOptionConfig = "consul.tlsconfig"
+
+ // maxLockRetries is the number of retries attempted when acquiring a lock
+ maxLockRetries = 10
+)
+
+type consulModule struct {
+ opts backendOptions
+ config *consulAPI.Config
+}
+
+var (
+ // consulDummyAddress can be overwritten from test invokers using ldflags
+ consulDummyAddress = "https://127.0.0.1:8501"
+ // consulDummyConfigFile can be overwritten from test invokers using ldflags
+ consulDummyConfigFile = "/tmp/cilium-consul-certs/cilium-consul.yaml"
+
+ module = newConsulModule()
+
+ // ErrNotImplemented is the error which is returned when a functionality is not implemented.
+ ErrNotImplemented = errors.New("not implemented")
+
+ consulLeaseKeepaliveControllerGroup = controller.NewGroup("consul-lease-keepalive")
+)
+
+func init() {
+ // register consul module for use
+ registerBackend(consulName, module)
+}
+
+func newConsulModule() backendModule {
+ return &consulModule{
+ opts: backendOptions{
+ ConsulAddrOption: &backendOption{
+ description: "Addresses of consul cluster",
+ },
+ ConsulOptionConfig: &backendOption{
+ description: "Path to consul tls configuration file",
+ },
+ },
+ }
+}
+
+func ConsulDummyAddress() string {
+ return consulDummyAddress
+}
+
+func ConsulDummyConfigFile() string {
+ return consulDummyConfigFile
+}
+
+func (c *consulModule) createInstance() backendModule {
+ return newConsulModule()
+}
+
+func (c *consulModule) getName() string {
+ return consulName
+}
+
+func (c *consulModule) setConfigDummy() {
+ c.config = consulAPI.DefaultConfig()
+ c.config.Address = consulDummyAddress
+ yc := consulAPI.TLSConfig{}
+ b, err := os.ReadFile(consulDummyConfigFile)
+ if err != nil {
+ log.WithError(err).Warnf("unable to read consul tls configuration file %s", consulDummyConfigFile)
+ }
+
+ err = yaml.Unmarshal(b, &yc)
+ if err != nil {
+ log.WithError(err).Warnf("invalid consul tls configuration in %s", consulDummyConfigFile)
+ }
+
+ c.config.TLSConfig = yc
+}
+
+func (c *consulModule) setConfig(opts map[string]string) error {
+ return setOpts(opts, c.opts)
+}
+
+func (c *consulModule) setExtraConfig(opts *ExtraOptions) error {
+ return nil
+}
+
+func (c *consulModule) getConfig() map[string]string {
+ return getOpts(c.opts)
+}
+
+func (c *consulModule) newClient(ctx context.Context, opts *ExtraOptions) (BackendOperations, chan error) {
+ log.WithFields(logrus.Fields{
+ logfields.URL: "https://cilium.herokuapp.com/",
+ }).Warning("Support for Consul as a kvstore backend has been deprecated due to lack of maintainers. If you are interested in helping to maintain Consul support in Cilium, please reach out on GitHub or the official Cilium slack")
+
+ errChan := make(chan error, 1)
+ backend, err := c.connectConsulClient(ctx, opts)
+ if err != nil {
+ errChan <- err
+ }
+ close(errChan)
+ return backend, errChan
+}
+
+func (c *consulModule) connectConsulClient(ctx context.Context, opts *ExtraOptions) (BackendOperations, error) {
+ if c.config == nil {
+ consulAddr, consulAddrSet := c.opts[ConsulAddrOption]
+ configPathOpt, configPathOptSet := c.opts[ConsulOptionConfig]
+ if !consulAddrSet {
+ return nil, fmt.Errorf("invalid consul configuration, please specify %s option", ConsulAddrOption)
+ }
+
+ if consulAddr.value == "" {
+ return nil, fmt.Errorf("invalid consul configuration, please specify %s option", ConsulAddrOption)
+ }
+
+ addr := consulAddr.value
+ c.config = consulAPI.DefaultConfig()
+ if configPathOptSet && configPathOpt.value != "" {
+ b, err := os.ReadFile(configPathOpt.value)
+ if err != nil {
+ return nil, fmt.Errorf("unable to read consul tls configuration file %s: %s", configPathOpt.value, err)
+ }
+ yc := consulAPI.TLSConfig{}
+ err = yaml.Unmarshal(b, &yc)
+ if err != nil {
+ return nil, fmt.Errorf("invalid consul tls configuration in %s: %s", configPathOpt.value, err)
+ }
+ c.config.TLSConfig = yc
+ }
+
+ c.config.Address = addr
+
+ }
+ client, err := newConsulClient(ctx, c.config, opts)
+ if err != nil {
+ return nil, err
+ }
+
+ return client, nil
+}
+
+var (
+ maxRetries = 30
+)
+
+type consulClient struct {
+ *consulAPI.Client
+ lease string
+ controllers *controller.Manager
+ extraOptions *ExtraOptions
+ disconnectedMu lock.RWMutex
+ disconnected chan struct{}
+ statusCheckErrors chan error
+}
+
+func newConsulClient(ctx context.Context, config *consulAPI.Config, opts *ExtraOptions) (BackendOperations, error) {
+ var (
+ c *consulAPI.Client
+ err error
+ )
+ if config != nil {
+ c, err = consulAPI.NewClient(config)
+ } else {
+ c, err = consulAPI.NewClient(consulAPI.DefaultConfig())
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ boff := backoff.Exponential{Min: time.Duration(100) * time.Millisecond}
+
+ for i := 0; i < maxRetries; i++ {
+ var leader string
+ leader, err = c.Status().Leader()
+
+ if err == nil {
+ if leader != "" {
+ // happy path
+ break
+ } else {
+ err = errors.New("timeout while waiting for leader to be elected")
+ }
+ }
+ log.Info("Waiting for consul to elect a leader")
+ boff.Wait(ctx)
+ }
+
+ if err != nil {
+ log.WithError(err).Fatal("Unable to contact consul server")
+ }
+
+ entry := &consulAPI.SessionEntry{
+ TTL: fmt.Sprintf("%ds", int(option.Config.KVstoreLeaseTTL.Seconds())),
+ Behavior: consulAPI.SessionBehaviorDelete,
+ }
+
+ wo := &consulAPI.WriteOptions{}
+ lease, _, err := c.Session().Create(entry, wo.WithContext(ctx))
+ if err != nil {
+ return nil, fmt.Errorf("unable to create default lease: %s", err)
+ }
+
+ client := &consulClient{
+ Client: c,
+ lease: lease,
+ controllers: controller.NewManager(),
+ extraOptions: opts,
+ disconnected: make(chan struct{}),
+ statusCheckErrors: make(chan error, 128),
+ }
+
+ client.controllers.UpdateController(
+ fmt.Sprintf("consul-lease-keepalive-%p", c),
+ controller.ControllerParams{
+ Group: consulLeaseKeepaliveControllerGroup,
+ DoFunc: func(ctx context.Context) error {
+ wo := &consulAPI.WriteOptions{}
+ _, _, err := c.Session().Renew(lease, wo.WithContext(ctx))
+ if err != nil {
+ // consider disconnected!
+ client.disconnectedMu.Lock()
+ close(client.disconnected)
+ client.disconnected = make(chan struct{})
+ client.disconnectedMu.Unlock()
+ }
+ return err
+ },
+ RunInterval: option.Config.KVstoreKeepAliveInterval,
+ },
+ )
+
+ return client, nil
+}
+
+type ConsulLocker struct {
+ *consulAPI.Lock
+}
+
+func (cl *ConsulLocker) Unlock(ctx context.Context) error {
+ return cl.Lock.Unlock()
+}
+
+func (cl *ConsulLocker) Comparator() interface{} {
+ return nil
+}
+
+func (c *consulClient) LockPath(ctx context.Context, path string) (KVLocker, error) {
+ lockKey, err := c.LockOpts(&consulAPI.LockOptions{Key: getLockPath(path)})
+ if err != nil {
+ return nil, err
+ }
+
+ for retries := 0; retries < maxLockRetries; retries++ {
+ ch, err := lockKey.Lock(nil)
+ switch {
+ case err != nil:
+ return nil, err
+ case ch == nil:
+ Trace("Acquiring lock timed out, retrying", nil, logrus.Fields{fieldKey: path, logfields.Attempt: retries})
+ default:
+ return &ConsulLocker{Lock: lockKey}, err
+ }
+
+ select {
+ case <-ctx.Done():
+ return nil, fmt.Errorf("lock cancelled via context: %s", ctx.Err())
+ default:
+ }
+ }
+
+ return nil, fmt.Errorf("maximum retries (%d) reached", maxLockRetries)
+}
+
+// Watch starts watching for changes in a prefix
+func (c *consulClient) Watch(ctx context.Context, w *Watcher) {
+ // Last known state of all KVPairs matching the prefix
+ localState := map[string]consulAPI.KVPair{}
+ nextIndex := uint64(0)
+
+ q := &consulAPI.QueryOptions{
+ WaitTime: time.Second,
+ }
+
+ qo := q.WithContext(ctx)
+
+ sleepTimer, sleepTimerDone := inctimer.New()
+ defer sleepTimerDone()
+
+ for {
+ // Initialize sleep time to a millisecond as we don't
+ // want to sleep in between successful watch cycles
+ sleepTime := 1 * time.Millisecond
+
+ qo.WaitIndex = nextIndex
+ pairs, q, err := c.KV().List(w.Prefix, qo)
+ if err != nil {
+ sleepTime = 5 * time.Second
+ Trace("List of Watch failed", err, logrus.Fields{fieldPrefix: w.Prefix, fieldWatcher: w.Name})
+ }
+
+ if q != nil {
+ nextIndex = q.LastIndex
+ }
+
+ // timeout while watching for changes, re-schedule
+ if qo.WaitIndex != 0 && (q == nil || q.LastIndex == qo.WaitIndex) {
+ goto wait
+ }
+
+ for _, newPair := range pairs {
+ oldPair, ok := localState[newPair.Key]
+
+ // Keys reported for the first time must be new
+ if !ok {
+ if newPair.CreateIndex != newPair.ModifyIndex {
+ log.Debugf("consul: Previously unknown key %s received with CreateIndex(%d) != ModifyIndex(%d)",
+ newPair.Key, newPair.CreateIndex, newPair.ModifyIndex)
+ }
+
+ queueStart := spanstat.Start()
+ w.Events <- KeyValueEvent{
+ Typ: EventTypeCreate,
+ Key: newPair.Key,
+ Value: newPair.Value,
+ }
+ trackEventQueued(newPair.Key, EventTypeCreate, queueStart.End(true).Total())
+ } else if oldPair.ModifyIndex != newPair.ModifyIndex {
+ queueStart := spanstat.Start()
+ w.Events <- KeyValueEvent{
+ Typ: EventTypeModify,
+ Key: newPair.Key,
+ Value: newPair.Value,
+ }
+ trackEventQueued(newPair.Key, EventTypeModify, queueStart.End(true).Total())
+ }
+
+ // Everything left on localState will be assumed to
+ // have been deleted, therefore remove all keys in
+ // localState that still exist in the kvstore
+ delete(localState, newPair.Key)
+ }
+
+ for k, deletedPair := range localState {
+ queueStart := spanstat.Start()
+ w.Events <- KeyValueEvent{
+ Typ: EventTypeDelete,
+ Key: deletedPair.Key,
+ Value: deletedPair.Value,
+ }
+ trackEventQueued(deletedPair.Key, EventTypeDelete, queueStart.End(true).Total())
+ delete(localState, k)
+ }
+
+ for _, newPair := range pairs {
+ localState[newPair.Key] = *newPair
+
+ }
+
+ // Initial list operation has been completed, signal this
+ if qo.WaitIndex == 0 {
+ w.Events <- KeyValueEvent{Typ: EventTypeListDone}
+ }
+
+ wait:
+ select {
+ case <-sleepTimer.After(sleepTime):
+ case <-w.stopWatch:
+ close(w.Events)
+ w.stopWait.Done()
+ return
+ }
+ }
+}
+
+func (c *consulClient) waitForInitLock(ctx context.Context) <-chan struct{} {
+ initLockSucceeded := make(chan struct{})
+
+ go func() {
+ for {
+ locker, err := c.LockPath(ctx, InitLockPath)
+ if err == nil {
+ locker.Unlock(context.Background())
+ close(initLockSucceeded)
+ log.Info("Distributed lock successful, consul has quorum")
+ return
+ }
+
+ time.Sleep(100 * time.Millisecond)
+ }
+ }()
+
+ return initLockSucceeded
+}
+
+// Connected closes the returned channel when the consul client is connected.
+func (c *consulClient) Connected(ctx context.Context) <-chan error {
+ ch := make(chan error)
+ go func() {
+ for {
+ qo := &consulAPI.QueryOptions{}
+ // TODO find out if there's a better way to do this for consul
+ _, _, err := c.Session().Info(c.lease, qo.WithContext(ctx))
+ if err == nil {
+ break
+ }
+ time.Sleep(100 * time.Millisecond)
+ }
+ <-c.waitForInitLock(ctx)
+ close(ch)
+ }()
+ return ch
+}
+
+// Disconnected closes the returned channel when consul detects the client
+// is disconnected from the server.
+func (c *consulClient) Disconnected() <-chan struct{} {
+ c.disconnectedMu.RLock()
+ ch := c.disconnected
+ c.disconnectedMu.RUnlock()
+ return ch
+}
+
+func (c *consulClient) Status() (string, error) {
+ leader, err := c.Client.Status().Leader()
+ return "Consul: " + leader, err
+}
+
+func (c *consulClient) DeletePrefix(ctx context.Context, path string) (err error) {
+ defer func() { Trace("DeletePrefix", err, logrus.Fields{fieldPrefix: path}) }()
+
+ duration := spanstat.Start()
+ wo := &consulAPI.WriteOptions{}
+ _, err = c.Client.KV().DeleteTree(path, wo.WithContext(ctx))
+ increaseMetric(path, metricDelete, "DeletePrefix", duration.EndError(err).Total(), err)
+ return err
+}
+
+// Set sets value of key
+func (c *consulClient) Set(ctx context.Context, key string, value []byte) (err error) {
+ defer func() { Trace("Set", err, logrus.Fields{fieldKey: key, fieldValue: string(value)}) }()
+
+ duration := spanstat.Start()
+ wo := &consulAPI.WriteOptions{}
+ _, err = c.KV().Put(&consulAPI.KVPair{Key: key, Value: value}, wo.WithContext(ctx))
+ increaseMetric(key, metricSet, "Set", duration.EndError(err).Total(), err)
+ return err
+}
+
+// DeleteIfLocked deletes a key if the client is still holding the given lock.
+func (c *consulClient) DeleteIfLocked(ctx context.Context, key string, lock KVLocker) (err error) {
+ defer func() { Trace("DeleteIfLocked", err, logrus.Fields{fieldKey: key}) }()
+ return c.delete(ctx, key)
+}
+
+// Delete deletes a key
+func (c *consulClient) Delete(ctx context.Context, key string) (err error) {
+ defer func() { Trace("Delete", err, logrus.Fields{fieldKey: key}) }()
+ return c.delete(ctx, key)
+}
+
+func (c *consulClient) delete(ctx context.Context, key string) error {
+ duration := spanstat.Start()
+ wo := &consulAPI.WriteOptions{}
+ _, err := c.KV().Delete(key, wo.WithContext(ctx))
+ increaseMetric(key, metricDelete, "Delete", duration.EndError(err).Total(), err)
+ return err
+}
+
+// GetIfLocked returns value of key if the client is still holding the given lock.
+func (c *consulClient) GetIfLocked(ctx context.Context, key string, lock KVLocker) (bv []byte, err error) {
+ defer func() { Trace("GetIfLocked", err, logrus.Fields{fieldKey: key, fieldValue: string(bv)}) }()
+ return c.Get(ctx, key)
+}
+
+// Get returns value of key
+func (c *consulClient) Get(ctx context.Context, key string) (bv []byte, err error) {
+ defer func() { Trace("Get", err, logrus.Fields{fieldKey: key, fieldValue: string(bv)}) }()
+
+ duration := spanstat.Start()
+ qo := &consulAPI.QueryOptions{}
+ pair, _, err := c.KV().Get(key, qo.WithContext(ctx))
+ increaseMetric(key, metricRead, "Get", duration.EndError(err).Total(), err)
+ if err != nil {
+ return nil, err
+ }
+ if pair == nil {
+ return nil, nil
+ }
+ return pair.Value, nil
+}
+
+// GetPrefixIfLocked returns the first key which matches the prefix and its value if the client is still holding the given lock.
+func (c *consulClient) GetPrefixIfLocked(ctx context.Context, prefix string, lock KVLocker) (k string, bv []byte, err error) {
+ defer func() {
+ Trace("GetPrefixIfLocked", err, logrus.Fields{fieldPrefix: prefix, fieldKey: k, fieldValue: string(bv)})
+ }()
+ return c.getPrefix(ctx, prefix)
+}
+
+// GetPrefix returns the first key which matches the prefix and its value
+func (c *consulClient) GetPrefix(ctx context.Context, prefix string) (k string, bv []byte, err error) {
+ defer func() {
+ Trace("GetPrefix", err, logrus.Fields{fieldPrefix: prefix, fieldKey: k, fieldValue: string(bv)})
+ }()
+ return c.getPrefix(ctx, prefix)
+}
+
+func (c *consulClient) getPrefix(ctx context.Context, prefix string) (k string, bv []byte, err error) {
+ duration := spanstat.Start()
+ opts := &consulAPI.QueryOptions{}
+ pairs, _, err := c.KV().List(prefix, opts.WithContext(ctx))
+ increaseMetric(prefix, metricRead, "GetPrefix", duration.EndError(err).Total(), err)
+ if err != nil {
+ return "", nil, err
+ }
+
+ if len(pairs) == 0 {
+ return "", nil, nil
+ }
+
+ return pairs[0].Key, pairs[0].Value, nil
+}
+
+// UpdateIfLocked updates a key if the client is still holding the given lock.
+func (c *consulClient) UpdateIfLocked(ctx context.Context, key string, value []byte, lease bool, lock KVLocker) error {
+ return c.Update(ctx, key, value, lease)
+}
+
+// Update creates or updates a key with the value
+func (c *consulClient) Update(ctx context.Context, key string, value []byte, lease bool) (err error) {
+ defer func() {
+ Trace("Update", err, logrus.Fields{fieldKey: key, fieldValue: string(value), fieldAttachLease: lease})
+ }()
+
+ k := &consulAPI.KVPair{Key: key, Value: value}
+
+ if lease {
+ k.Session = c.lease
+ }
+
+ opts := &consulAPI.WriteOptions{}
+
+ duration := spanstat.Start()
+ _, err = c.KV().Put(k, opts.WithContext(ctx))
+ increaseMetric(key, metricSet, "Update", duration.EndError(err).Total(), err)
+ return err
+}
+
+// UpdateIfDifferentIfLocked updates a key if the value is different and if the client is still holding the given lock.
+func (c *consulClient) UpdateIfDifferentIfLocked(ctx context.Context, key string, value []byte, lease bool, lock KVLocker) (recreated bool, err error) {
+ defer func() {
+ Trace("UpdateIfDifferentIfLocked", err, logrus.Fields{fieldKey: key, fieldValue: value, fieldAttachLease: lease, "recreated": recreated})
+ }()
+
+ return c.updateIfDifferent(ctx, key, value, lease)
+}
+
+// UpdateIfDifferent updates a key if the value is different
+func (c *consulClient) UpdateIfDifferent(ctx context.Context, key string, value []byte, lease bool) (recreated bool, err error) {
+ defer func() {
+ Trace("UpdateIfDifferent", err, logrus.Fields{fieldKey: key, fieldValue: value, fieldAttachLease: lease, "recreated": recreated})
+ }()
+
+ return c.updateIfDifferent(ctx, key, value, lease)
+}
+
+func (c *consulClient) updateIfDifferent(ctx context.Context, key string, value []byte, lease bool) (bool, error) {
+ duration := spanstat.Start()
+ qo := &consulAPI.QueryOptions{}
+ getR, _, err := c.KV().Get(key, qo.WithContext(ctx))
+ increaseMetric(key, metricRead, "Get", duration.EndError(err).Total(), err)
+ // On error, attempt update blindly
+ if err != nil || getR == nil {
+ return true, c.Update(ctx, key, value, lease)
+ }
+
+ if lease && getR.Session != c.lease {
+ return true, c.Update(ctx, key, value, lease)
+ }
+
+ // if lease is different and value is not equal then update.
+ if !bytes.Equal(getR.Value, value) {
+ return true, c.Update(ctx, key, value, lease)
+ }
+
+ return false, nil
+}
+
+// CreateOnlyIfLocked atomically creates a key if the client is still holding the given lock or fails if it already exists
+func (c *consulClient) CreateOnlyIfLocked(ctx context.Context, key string, value []byte, lease bool, lock KVLocker) (success bool, err error) {
+ defer func() {
+ Trace("CreateOnlyIfLocked", err, logrus.Fields{fieldKey: key, fieldValue: value, fieldAttachLease: lease, "success": success})
+ }()
+ return c.createOnly(ctx, key, value, lease)
+}
+
+// CreateOnly creates a key with the value and will fail if the key already exists
+func (c *consulClient) CreateOnly(ctx context.Context, key string, value []byte, lease bool) (success bool, err error) {
+ defer func() {
+ Trace("CreateOnly", err, logrus.Fields{fieldKey: key, fieldValue: value, fieldAttachLease: lease, "success": success})
+ }()
+
+ return c.createOnly(ctx, key, value, lease)
+}
+
+func (c *consulClient) createOnly(ctx context.Context, key string, value []byte, lease bool) (bool, error) {
+ k := &consulAPI.KVPair{
+ Key: key,
+ Value: value,
+ CreateIndex: 0,
+ }
+
+ if lease {
+ k.Session = c.lease
+ }
+ opts := &consulAPI.WriteOptions{}
+
+ duration := spanstat.Start()
+ success, _, err := c.KV().CAS(k, opts.WithContext(ctx))
+ increaseMetric(key, metricSet, "CreateOnly", duration.EndError(err).Total(), err)
+ if err != nil {
+ return false, fmt.Errorf("unable to compare-and-swap: %s", err)
+ }
+ return success, nil
+}
+
+// createIfExists creates a key with the value only if key condKey exists
+func (c *consulClient) createIfExists(ctx context.Context, condKey, key string, value []byte, lease bool) error {
+ // Consul does not support transactions which would allow to check for
+ // the presence of a conditional key if the key is not the key being
+ // manipulated
+ //
+ // Lock the conditional key to serialize all CreateIfExists() calls
+
+ l, err := LockPath(ctx, c, condKey)
+ if err != nil {
+ return fmt.Errorf("unable to lock condKey for CreateIfExists: %s", err)
+ }
+
+ defer l.Unlock(context.Background())
+
+ // Create the key if it does not exist
+ if _, err := c.CreateOnly(ctx, key, value, lease); err != nil {
+ return err
+ }
+
+ // Consul does not support transactions which would allow to check for
+ // the presence of another key
+ masterKey, err := c.Get(ctx, condKey)
+ if err != nil || masterKey == nil {
+ c.Delete(ctx, key)
+ return fmt.Errorf("conditional key not present")
+ }
+
+ return nil
+}
+
+// CreateIfExists creates a key with the value only if key condKey exists
+func (c *consulClient) CreateIfExists(ctx context.Context, condKey, key string, value []byte, lease bool) (err error) {
+ defer func() {
+ Trace("CreateIfExists", err, logrus.Fields{fieldKey: key, fieldValue: string(value), fieldCondition: condKey, fieldAttachLease: lease})
+ }()
+
+ duration := spanstat.Start()
+ err = c.createIfExists(ctx, condKey, key, value, lease)
+ increaseMetric(key, metricSet, "CreateIfExists", duration.EndError(err).Total(), err)
+ return err
+}
+
+// ListPrefixIfLocked returns a list of keys matching the prefix only if the client is still holding the given lock.
+func (c *consulClient) ListPrefixIfLocked(ctx context.Context, prefix string, lock KVLocker) (v KeyValuePairs, err error) {
+ defer func() { Trace("ListPrefixIfLocked", err, logrus.Fields{fieldPrefix: prefix, fieldNumEntries: len(v)}) }()
+ return c.listPrefix(ctx, prefix)
+}
+
+// ListPrefix returns a map of matching keys
+func (c *consulClient) ListPrefix(ctx context.Context, prefix string) (v KeyValuePairs, err error) {
+ defer func() { Trace("ListPrefix", err, logrus.Fields{fieldPrefix: prefix, fieldNumEntries: len(v)}) }()
+ return c.listPrefix(ctx, prefix)
+}
+
+func (c *consulClient) listPrefix(ctx context.Context, prefix string) (KeyValuePairs, error) {
+ duration := spanstat.Start()
+ qo := &consulAPI.QueryOptions{}
+ pairs, _, err := c.KV().List(prefix, qo.WithContext(ctx))
+ increaseMetric(prefix, metricRead, "ListPrefix", duration.EndError(err).Total(), err)
+ if err != nil {
+ return nil, err
+ }
+
+ p := KeyValuePairs(make(map[string]Value, len(pairs)))
+ for i := 0; i < len(pairs); i++ {
+ p[pairs[i].Key] = Value{
+ Data: pairs[i].Value,
+ ModRevision: pairs[i].ModifyIndex,
+ SessionID: pairs[i].Session,
+ }
+ }
+
+ return p, nil
+}
+
+// Close closes the consul session
+func (c *consulClient) Close(ctx context.Context) {
+ close(c.statusCheckErrors)
+ if c.controllers != nil {
+ c.controllers.RemoveAll()
+ }
+ if c.lease != "" {
+ c.Session().Destroy(c.lease, nil)
+ }
+}
+
+// GetCapabilities returns the capabilities of the backend
+func (c *consulClient) GetCapabilities() Capabilities {
+ return Capabilities(0)
+}
+
+// Encode encodes a binary slice into a character set that the backend supports
+func (c *consulClient) Encode(in []byte) (out string) {
+ defer func() { Trace("Encode", nil, logrus.Fields{"in": in, "out": out}) }()
+ return base64.URLEncoding.EncodeToString([]byte(in))
+}
+
+// Decode decodes a key previously encoded back into the original binary slice
+func (c *consulClient) Decode(in string) (out []byte, err error) {
+ defer func() { Trace("Decode", err, logrus.Fields{"in": in, "out": out}) }()
+ return base64.URLEncoding.DecodeString(in)
+}
+
+// ListAndWatch implements the BackendOperations.ListAndWatch using consul
+func (c *consulClient) ListAndWatch(ctx context.Context, name, prefix string, chanSize int) *Watcher {
+ w := newWatcher(name, prefix, chanSize)
+
+ log.WithField(fieldWatcher, w).Debug("Starting watcher...")
+
+ go c.Watch(ctx, w)
+
+ return w
+}
+
+// StatusCheckErrors returns a channel which receives status check errors
+func (c *consulClient) StatusCheckErrors() <-chan error {
+ return c.statusCheckErrors
+}
+
+// RegisterLeaseExpiredObserver is not implemented for the consul backend
+func (c *consulClient) RegisterLeaseExpiredObserver(prefix string, fn func(key string)) {}
+
+// UserEnforcePresence is not implemented for the consul backend
+func (c *consulClient) UserEnforcePresence(ctx context.Context, name string, roles []string) error {
+ return ErrNotImplemented
+}
+
+// UserEnforceAbsence is not implemented for the consul backend
+func (c *consulClient) UserEnforceAbsence(ctx context.Context, name string) error {
+ return ErrNotImplemented
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/kvstore/doc.go b/vendor/github.com/cilium/cilium/pkg/kvstore/doc.go
new file mode 100644
index 000000000..b69800a22
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/kvstore/doc.go
@@ -0,0 +1,6 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Package kvstore abstracts KVstore access and provides a high level API to
+// atomically manage cluster wide resources
+package kvstore
diff --git a/vendor/github.com/cilium/cilium/pkg/kvstore/dummy.go b/vendor/github.com/cilium/cilium/pkg/kvstore/dummy.go
new file mode 100644
index 000000000..61a1a5173
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/kvstore/dummy.go
@@ -0,0 +1,83 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package kvstore
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ "github.com/cilium/cilium/pkg/inctimer"
+)
+
+// SetupDummy sets up kvstore for tests. A lock mechanism it used to prevent
+// the creation of two clients at the same time, to avoid interferences in case
+// different tests are run in parallel. A cleanup function is automatically
+// registered to delete all keys and close the client when the test terminates.
+func SetupDummy(tb testing.TB, dummyBackend string) {
+ SetupDummyWithConfigOpts(tb, dummyBackend, nil)
+}
+
+// SetupDummyWithConfigOpts sets up the dummy kvstore for tests but also
+// configures the module with the provided opts. A lock mechanism it used to
+// prevent the creation of two clients at the same time, to avoid interferences
+// in case different tests are run in parallel. A cleanup function is
+// automatically registered to delete all keys and close the client when the
+// test terminates.
+func SetupDummyWithConfigOpts(tb testing.TB, dummyBackend string, opts map[string]string) {
+ module := getBackend(dummyBackend)
+ if module == nil {
+ tb.Fatalf("Unknown dummy kvstore backend %s", dummyBackend)
+ }
+
+ module.setConfigDummy()
+
+ if opts != nil {
+ err := module.setConfig(opts)
+ if err != nil {
+ tb.Fatalf("Unable to set config options for kvstore backend module: %v", err)
+ }
+ }
+
+ if err := initClient(context.Background(), module, nil); err != nil {
+ tb.Fatalf("Unable to initialize kvstore client: %v", err)
+ }
+
+ tb.Cleanup(func() {
+ if err := Client().DeletePrefix(context.Background(), ""); err != nil {
+ tb.Fatalf("Unable to delete all kvstore keys: %v", err)
+ }
+
+ Client().Close(context.Background())
+ })
+
+ ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second)
+ defer cancel()
+
+ timer, done := inctimer.New()
+ defer done()
+
+ // Multiple tests might be running in parallel by go test if they are part of
+ // different packages. Let's implement a locking mechanism to ensure that only
+ // one at a time can access the kvstore, to prevent that they interact with
+ // each other. Locking is implemented through CreateOnly (rather than using
+ // the locking abstraction), so that we can release it in the same atomic
+ // transaction that also removes all the other keys.
+ for {
+ succeeded, err := Client().CreateOnly(ctx, ".lock", []byte(""), true)
+ if err != nil {
+ tb.Fatalf("Unable to acquire the kvstore lock: %v", err)
+ }
+
+ if succeeded {
+ return
+ }
+
+ select {
+ case <-timer.After(100 * time.Millisecond):
+ case <-ctx.Done():
+ tb.Fatal("Timed out waiting to acquire the kvstore lock")
+ }
+ }
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/kvstore/etcd.go b/vendor/github.com/cilium/cilium/pkg/kvstore/etcd.go
new file mode 100644
index 000000000..8f9fb4e2a
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/kvstore/etcd.go
@@ -0,0 +1,2126 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package kvstore
+
+import (
+ "bytes"
+ "context"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "net/url"
+ "os"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/blang/semver/v4"
+ "github.com/sirupsen/logrus"
+ "go.etcd.io/etcd/api/v3/mvccpb"
+ v3rpcErrors "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
+ "go.etcd.io/etcd/client/pkg/v3/logutil"
+ "go.etcd.io/etcd/client/pkg/v3/tlsutil"
+ client "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/client/v3/concurrency"
+ clientyaml "go.etcd.io/etcd/client/v3/yaml"
+ "go.uber.org/zap"
+ "go.uber.org/zap/zapcore"
+ "golang.org/x/time/rate"
+ "sigs.k8s.io/yaml"
+
+ "github.com/cilium/cilium/pkg/backoff"
+ "github.com/cilium/cilium/pkg/contexthelpers"
+ "github.com/cilium/cilium/pkg/controller"
+ "github.com/cilium/cilium/pkg/defaults"
+ "github.com/cilium/cilium/pkg/inctimer"
+ "github.com/cilium/cilium/pkg/lock"
+ "github.com/cilium/cilium/pkg/option"
+ "github.com/cilium/cilium/pkg/rand"
+ ciliumrate "github.com/cilium/cilium/pkg/rate"
+ ciliumratemetrics "github.com/cilium/cilium/pkg/rate/metrics"
+ "github.com/cilium/cilium/pkg/spanstat"
+ "github.com/cilium/cilium/pkg/versioncheck"
+)
+
+const (
+ // EtcdBackendName is the backend name for etcd
+ EtcdBackendName = "etcd"
+
+ EtcdAddrOption = "etcd.address"
+ isEtcdOperatorOption = "etcd.operator"
+ EtcdOptionConfig = "etcd.config"
+ EtcdOptionKeepAliveHeartbeat = "etcd.keepaliveHeartbeat"
+ EtcdOptionKeepAliveTimeout = "etcd.keepaliveTimeout"
+
+ // EtcdRateLimitOption specifies maximum kv operations per second
+ EtcdRateLimitOption = "etcd.qps"
+
+ // EtcdMaxInflightOption specifies maximum inflight concurrent kv store operations
+ EtcdMaxInflightOption = "etcd.maxInflight"
+
+ // EtcdListLimitOption limits the number of results retrieved in one batch
+ // by ListAndWatch operations. A 0 value equals to no limit.
+ EtcdListLimitOption = "etcd.limit"
+
+ minRequiredVersionStr = ">=3.1.0"
+
+ etcdLockSessionRenewNamePrefix = "kvstore-etcd-lock-session-renew"
+
+ // etcdMaxKeysPerLease is the maximum number of keys that can be attached to a lease
+ etcdMaxKeysPerLease = 1000
+)
+
+var (
+ // ErrLockLeaseExpired is an error whenever the lease of the lock does not
+ // exist or it was expired.
+ ErrLockLeaseExpired = errors.New("transaction did not succeed: lock lease expired")
+
+ randGen = rand.NewSafeRand(time.Now().UnixNano())
+
+ etcdLockSessionRenewControllerGroup = controller.NewGroup(etcdLockSessionRenewNamePrefix)
+)
+
+type etcdModule struct {
+ opts backendOptions
+ config *client.Config
+}
+
+// versionCheckTimeout is the time we wait trying to verify the version
+// of an etcd endpoint. The timeout can be encountered on network
+// connectivity problems.
+const versionCheckTimeout = 30 * time.Second
+
+var (
+ // statusCheckTimeout is the timeout when performing status checks with
+ // all etcd endpoints
+ statusCheckTimeout = 10 * time.Second
+
+ // initialConnectionTimeout is the timeout for the initial connection to
+ // the etcd server
+ initialConnectionTimeout = 15 * time.Minute
+
+ minRequiredVersion = versioncheck.MustCompile(minRequiredVersionStr)
+
+ // etcdDummyAddress can be overwritten from test invokers using ldflags
+ etcdDummyAddress = "http://127.0.0.1:4002"
+
+ etcdInstance = newEtcdModule()
+
+ // etcd3ClientLogger is the logger used for the underlying etcd clients. We
+ // explicitly initialize a logger and propagate it to prevent each client from
+ // automatically creating a new one, which comes with a significant memory cost.
+ etcd3ClientLogger *zap.Logger
+)
+
+func EtcdDummyAddress() string {
+ return etcdDummyAddress
+}
+
+func newEtcdModule() backendModule {
+ return &etcdModule{
+ opts: backendOptions{
+ isEtcdOperatorOption: &backendOption{
+ description: "if the configuration is setting up an etcd-operator",
+ },
+ EtcdAddrOption: &backendOption{
+ description: "Addresses of etcd cluster",
+ },
+ EtcdOptionConfig: &backendOption{
+ description: "Path to etcd configuration file",
+ },
+ EtcdOptionKeepAliveTimeout: &backendOption{
+ description: "Timeout after which an unanswered heartbeat triggers the connection to be closed",
+ validate: func(v string) error {
+ _, err := time.ParseDuration(v)
+ return err
+ },
+ },
+ EtcdOptionKeepAliveHeartbeat: &backendOption{
+ description: "Heartbeat interval to keep gRPC connection alive",
+ validate: func(v string) error {
+ _, err := time.ParseDuration(v)
+ return err
+ },
+ },
+ EtcdRateLimitOption: &backendOption{
+ description: "Rate limit in kv store operations per second",
+ validate: func(v string) error {
+ _, err := strconv.Atoi(v)
+ return err
+ },
+ },
+ EtcdMaxInflightOption: &backendOption{
+ description: "Maximum inflight concurrent kv store operations; defaults to etcd.qps if unset",
+ validate: func(v string) error {
+ _, err := strconv.Atoi(v)
+ return err
+ },
+ },
+ EtcdListLimitOption: &backendOption{
+ description: "Max number of results retrieved in one batch by ListAndWatch operations (0 = no limit)",
+ validate: func(v string) error {
+ _, err := strconv.Atoi(v)
+ return err
+ },
+ },
+ },
+ }
+}
+
+func (e *etcdModule) createInstance() backendModule {
+ return newEtcdModule()
+}
+
+func (e *etcdModule) getName() string {
+ return EtcdBackendName
+}
+
+func (e *etcdModule) setConfigDummy() {
+ e.config = &client.Config{}
+ e.config.Endpoints = []string{etcdDummyAddress}
+}
+
+func (e *etcdModule) setConfig(opts map[string]string) error {
+ return setOpts(opts, e.opts)
+}
+
+func (e *etcdModule) setExtraConfig(opts *ExtraOptions) error {
+ if opts != nil && len(opts.DialOption) != 0 {
+ e.config = &client.Config{}
+ e.config.DialOptions = append(e.config.DialOptions, opts.DialOption...)
+ }
+ return nil
+}
+
+func (e *etcdModule) getConfig() map[string]string {
+ return getOpts(e.opts)
+}
+
+func shuffleEndpoints(endpoints []string) {
+ randGen.Shuffle(len(endpoints), func(i, j int) {
+ endpoints[i], endpoints[j] = endpoints[j], endpoints[i]
+ })
+}
+
+type clientOptions struct {
+ KeepAliveHeartbeat time.Duration
+ KeepAliveTimeout time.Duration
+ RateLimit int
+ MaxInflight int
+ ListBatchSize int
+}
+
+func (e *etcdModule) newClient(ctx context.Context, opts *ExtraOptions) (BackendOperations, chan error) {
+ errChan := make(chan error, 10)
+
+ clientOptions := clientOptions{
+ KeepAliveHeartbeat: 15 * time.Second,
+ KeepAliveTimeout: 25 * time.Second,
+ RateLimit: defaults.KVstoreQPS,
+ ListBatchSize: 256,
+ }
+
+ if o, ok := e.opts[EtcdRateLimitOption]; ok && o.value != "" {
+ clientOptions.RateLimit, _ = strconv.Atoi(o.value)
+ }
+
+ if o, ok := e.opts[EtcdMaxInflightOption]; ok && o.value != "" {
+ clientOptions.MaxInflight, _ = strconv.Atoi(o.value)
+ }
+
+ if clientOptions.MaxInflight == 0 {
+ clientOptions.MaxInflight = clientOptions.RateLimit
+ }
+
+ if o, ok := e.opts[EtcdListLimitOption]; ok && o.value != "" {
+ clientOptions.ListBatchSize, _ = strconv.Atoi(o.value)
+ }
+
+ if o, ok := e.opts[EtcdOptionKeepAliveTimeout]; ok && o.value != "" {
+ clientOptions.KeepAliveTimeout, _ = time.ParseDuration(o.value)
+ }
+
+ if o, ok := e.opts[EtcdOptionKeepAliveHeartbeat]; ok && o.value != "" {
+ clientOptions.KeepAliveHeartbeat, _ = time.ParseDuration(o.value)
+ }
+
+ endpointsOpt, endpointsSet := e.opts[EtcdAddrOption]
+ configPathOpt, configSet := e.opts[EtcdOptionConfig]
+
+ var configPath string
+ if configSet {
+ configPath = configPathOpt.value
+ }
+ if e.config == nil {
+ if !endpointsSet && !configSet {
+ errChan <- fmt.Errorf("invalid etcd configuration, %s or %s must be specified", EtcdOptionConfig, EtcdAddrOption)
+ close(errChan)
+ return nil, errChan
+ }
+
+ if endpointsOpt.value == "" && configPath == "" {
+ errChan <- fmt.Errorf("invalid etcd configuration, %s or %s must be specified",
+ EtcdOptionConfig, EtcdAddrOption)
+ close(errChan)
+ return nil, errChan
+ }
+
+ e.config = &client.Config{}
+ }
+
+ if e.config.Endpoints == nil && endpointsSet {
+ e.config.Endpoints = []string{endpointsOpt.value}
+ }
+
+ log.WithFields(logrus.Fields{
+ "ConfigPath": configPath,
+ "KeepAliveHeartbeat": clientOptions.KeepAliveHeartbeat,
+ "KeepAliveTimeout": clientOptions.KeepAliveTimeout,
+ "RateLimit": clientOptions.RateLimit,
+ "MaxInflight": clientOptions.MaxInflight,
+ "ListLimit": clientOptions.ListBatchSize,
+ }).Info("Creating etcd client")
+
+ for {
+ // connectEtcdClient will close errChan when the connection attempt has
+ // been successful
+ backend, err := connectEtcdClient(ctx, e.config, configPath, errChan, clientOptions, opts)
+ switch {
+ case os.IsNotExist(err):
+ log.WithError(err).Info("Waiting for all etcd configuration files to be available")
+ time.Sleep(5 * time.Second)
+ case err != nil:
+ errChan <- err
+ close(errChan)
+ return backend, errChan
+ default:
+ return backend, errChan
+ }
+ }
+}
+
+func init() {
+ // register etcd module for use
+ registerBackend(EtcdBackendName, etcdInstance)
+
+ if duration := os.Getenv("CILIUM_ETCD_STATUS_CHECK_INTERVAL"); duration != "" {
+ timeout, err := time.ParseDuration(duration)
+ if err == nil {
+ statusCheckTimeout = timeout
+ }
+ }
+
+ // Initialize the etcd client logger.
+ l, err := logutil.CreateDefaultZapLogger(etcdClientDebugLevel())
+ if err != nil {
+ log.WithError(err).Warning("Failed to initialize etcd client logger")
+ l = zap.NewNop()
+ }
+ etcd3ClientLogger = l.Named("etcd-client")
+}
+
+// etcdClientDebugLevel translates ETCD_CLIENT_DEBUG into zap log level.
+// This is a copy of a private etcd client function:
+// https://github.com/etcd-io/etcd/blob/v3.5.9/client/v3/logger.go#L47-L59
+func etcdClientDebugLevel() zapcore.Level {
+ envLevel := os.Getenv("ETCD_CLIENT_DEBUG")
+ if envLevel == "" || envLevel == "true" {
+ return zapcore.InfoLevel
+ }
+ var l zapcore.Level
+ if err := l.Set(envLevel); err != nil {
+ log.Warning("Invalid value for environment variable 'ETCD_CLIENT_DEBUG'. Using default level: 'info'")
+ return zapcore.InfoLevel
+ }
+ return l
+}
+
+// Hint tries to improve the error message displayed to te user.
+func Hint(err error) error {
+ switch err {
+ case context.DeadlineExceeded:
+ return fmt.Errorf("etcd client timeout exceeded")
+ default:
+ return err
+ }
+}
+
+type etcdClient struct {
+ // firstSession is a channel that will be closed once the first session
+ // is set up in the etcd client. If an error occurred and the initial
+ // session cannot be established, the error is provided via the
+ // channel.
+ firstSession chan struct{}
+
+ // stopStatusChecker is closed when the status checker can be terminated
+ stopStatusChecker chan struct{}
+
+ client *client.Client
+ controllers *controller.Manager
+
+ // config and configPath are initialized once and never written to again, they can be accessed without locking
+ config *client.Config
+ configPath string
+
+ // statusCheckErrors receives all errors reported by statusChecker()
+ statusCheckErrors chan error
+
+ // protects all sessions and sessionErr from concurrent access
+ lock.RWMutex
+
+ sessionErr error
+ lockSession *concurrency.Session
+ lockSessionCancel context.CancelFunc
+
+ leaseManager *etcdLeaseManager
+
+ // statusLock protects latestStatusSnapshot and latestErrorStatus for
+ // read/write access
+ statusLock lock.RWMutex
+
+ // latestStatusSnapshot is a snapshot of the latest etcd cluster status
+ latestStatusSnapshot string
+
+ // latestErrorStatus is the latest error condition of the etcd connection
+ latestErrorStatus error
+
+ extraOptions *ExtraOptions
+
+ limiter *ciliumrate.APILimiter
+ listBatchSize int
+
+ lastHeartbeat time.Time
+
+ leaseExpiredObservers sync.Map
+}
+
+func (e *etcdClient) getLogger() *logrus.Entry {
+ endpoints, path := []string{""}, ""
+ if e != nil {
+ if e.config != nil {
+ endpoints = e.config.Endpoints
+ }
+ path = e.configPath
+ }
+
+ return log.WithFields(logrus.Fields{
+ "endpoints": endpoints,
+ "config": path,
+ })
+}
+
+type etcdMutex struct {
+ mutex *concurrency.Mutex
+}
+
+func (e *etcdMutex) Unlock(ctx context.Context) error {
+ return e.mutex.Unlock(ctx)
+}
+
+func (e *etcdMutex) Comparator() interface{} {
+ return e.mutex.IsOwner()
+}
+
+// StatusCheckErrors returns a channel which receives status check errors
+func (e *etcdClient) StatusCheckErrors() <-chan error {
+ return e.statusCheckErrors
+}
+
+// GetLockSessionLeaseID returns the current lease ID for the lock session.
+func (e *etcdClient) GetLockSessionLeaseID() client.LeaseID {
+ e.RWMutex.RLock()
+ l := e.lockSession.Lease()
+ e.RWMutex.RUnlock()
+ return l
+}
+
+// checkSession verifies if the lease is still valid from the return error of
+// an etcd API call. If the error explicitly states that a lease was not found
+// we mark the session has an orphan for this etcd client. If we would not mark
+// it as an Orphan() the session would be considered expired after the leaseTTL
+// By make it orphan we guarantee the session will be marked to be renewed.
+func (e *etcdClient) checkLockSession(err error, leaseID client.LeaseID) {
+ if errors.Is(err, v3rpcErrors.ErrLeaseNotFound) {
+ e.closeLockSession(leaseID)
+ }
+}
+
+// closeSession closes the current session.
+func (e *etcdClient) closeLockSession(leaseID client.LeaseID) {
+ e.RWMutex.RLock()
+ // only mark a session as orphan if the leaseID is the same as the
+ // session ID to avoid making any other sessions as orphan.
+ if e.lockSession.Lease() == leaseID {
+ e.lockSession.Orphan()
+ }
+ e.RWMutex.RUnlock()
+}
+
+func (e *etcdClient) waitForInitLock(ctx context.Context) <-chan error {
+ initLockSucceeded := make(chan error)
+
+ go func() {
+ for {
+ select {
+ case <-e.client.Ctx().Done():
+ initLockSucceeded <- fmt.Errorf("client context ended: %w", e.client.Ctx().Err())
+ close(initLockSucceeded)
+ return
+ case <-ctx.Done():
+ initLockSucceeded <- fmt.Errorf("caller context ended: %w", ctx.Err())
+ close(initLockSucceeded)
+ return
+ default:
+ }
+
+ if e.extraOptions != nil && e.extraOptions.NoLockQuorumCheck {
+ close(initLockSucceeded)
+ return
+ }
+
+ // Generate a random number so that we can acquire a lock even
+ // if other agents are killed while locking this path.
+ randNumber := strconv.FormatUint(randGen.Uint64(), 16)
+ locker, err := e.LockPath(ctx, InitLockPath+"/"+randNumber)
+ if err == nil {
+ locker.Unlock(context.Background())
+ close(initLockSucceeded)
+ e.getLogger().Debug("Distributed lock successful, etcd has quorum")
+ return
+ }
+
+ time.Sleep(100 * time.Millisecond)
+ }
+ }()
+
+ return initLockSucceeded
+}
+
+func (e *etcdClient) isConnectedAndHasQuorum(ctx context.Context) error {
+ ctxTimeout, cancel := context.WithTimeout(ctx, statusCheckTimeout)
+ defer cancel()
+
+ select {
+ // Wait for the the initial connection to be established
+ case <-e.firstSession:
+ if err := e.sessionError(); err != nil {
+ return err
+ }
+ // Client is closing
+ case <-e.client.Ctx().Done():
+ return fmt.Errorf("client is closing")
+ // Timeout while waiting for initial connection, no success
+ case <-ctxTimeout.Done():
+ recordQuorumError("timeout")
+ return fmt.Errorf("timeout while waiting for initial connection")
+ }
+
+ e.RLock()
+ ch := e.lockSession.Done()
+ e.RUnlock()
+
+ initLockSucceeded := e.waitForInitLock(ctxTimeout)
+ select {
+ // Catch disconnect event, no success
+ case <-ch:
+ recordQuorumError("session timeout")
+ return fmt.Errorf("etcd session ended")
+ // wait for initial lock to succeed
+ case err := <-initLockSucceeded:
+ if err != nil {
+ recordQuorumError("lock timeout")
+ return fmt.Errorf("unable to acquire lock: %w", err)
+ }
+
+ return nil
+ }
+}
+
+// Connected closes the returned channel when the etcd client is connected. If
+// the context is cancelled or if the etcd client is closed, an error is
+// returned on the channel.
+func (e *etcdClient) Connected(ctx context.Context) <-chan error {
+ out := make(chan error)
+ go func() {
+ defer close(out)
+ for {
+ select {
+ case <-e.client.Ctx().Done():
+ out <- fmt.Errorf("etcd client context ended")
+ return
+ case <-ctx.Done():
+ out <- ctx.Err()
+ return
+ default:
+ }
+ if e.isConnectedAndHasQuorum(ctx) == nil {
+ return
+ }
+ time.Sleep(100 * time.Millisecond)
+ }
+ }()
+ return out
+}
+
+// Disconnected closes the returned channel when the etcd client is
+// disconnected after being reconnected. Blocks until the etcd client is first
+// connected with the kvstore.
+func (e *etcdClient) Disconnected() <-chan struct{} {
+ <-e.firstSession
+ e.RLock()
+ ch := e.lockSession.Done()
+ e.RUnlock()
+ return ch
+}
+
+func (e *etcdClient) renewLockSession(ctx context.Context) error {
+ if err := e.waitForInitialSession(ctx); err != nil {
+ return err
+ }
+
+ e.RWMutex.RLock()
+ lockSessionChan := e.lockSession.Done()
+ e.RWMutex.RUnlock()
+
+ select {
+ // session has ended
+ case <-lockSessionChan:
+ // controller has stopped or etcd client is closing
+ case <-ctx.Done():
+ return nil
+ }
+ // This is an attempt to avoid concurrent access of a session that was
+ // already expired. It's not perfect as there is still a period between the
+ // e.lockSession.Done() is closed and the e.Lock() is held where parallel go
+ // routines can get a lease ID of an already expired lease.
+ e.Lock()
+
+ if e.lockSessionCancel != nil {
+ e.lockSessionCancel()
+ e.lockSessionCancel = nil
+ }
+
+ // Create a context representing the lifetime of the lock session. It
+ // will timeout if the session creation does not succeed in time and
+ // persists until any of the below conditions are met:
+ // - The parent context is cancelled due to the etcd client closing or
+ // the controller being shut down
+ // - The above call to sessionCancel() cancels the session due to the
+ // session ending and requiring renewal.
+ sessionContext, sessionCancel, sessionSuccess := contexthelpers.NewConditionalTimeoutContext(ctx, statusCheckTimeout)
+ defer close(sessionSuccess)
+
+ newSession, err := concurrency.NewSession(
+ e.client,
+ concurrency.WithTTL(int(defaults.LockLeaseTTL.Seconds())),
+ concurrency.WithContext(sessionContext),
+ )
+ if err != nil {
+ e.UnlockIgnoreTime()
+ return fmt.Errorf("unable to renew etcd lock session: %s", err)
+ }
+ sessionSuccess <- true
+ log.Infof("Got new lock lease ID %x", newSession.Lease())
+
+ e.lockSession = newSession
+ e.lockSessionCancel = sessionCancel
+ e.UnlockIgnoreTime()
+
+ e.getLogger().WithField(fieldSession, newSession).Debug("Renewing etcd lock session")
+
+ if err := e.checkMinVersion(ctx, versionCheckTimeout); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func connectEtcdClient(ctx context.Context, config *client.Config, cfgPath string, errChan chan error, clientOptions clientOptions, opts *ExtraOptions) (BackendOperations, error) {
+ if cfgPath != "" {
+ cfg, err := newConfig(cfgPath)
+ if err != nil {
+ return nil, err
+ }
+ if cfg.TLS != nil {
+ cfg.TLS.GetClientCertificate, err = getClientCertificateReloader(cfgPath)
+ if err != nil {
+ return nil, err
+ }
+ }
+ cfg.DialOptions = append(cfg.DialOptions, config.DialOptions...)
+ config = cfg
+ }
+
+ // Shuffle the order of endpoints to avoid all agents connecting to the
+ // same etcd endpoint and to work around etcd client library failover
+ // bugs. (https://github.com/etcd-io/etcd/pull/9860)
+ if config.Endpoints != nil {
+ shuffleEndpoints(config.Endpoints)
+ }
+
+ // Set client context so that client can be cancelled from outside
+ config.Context = ctx
+ // Set DialTimeout to 0, otherwise the creation of a new client will
+ // block until DialTimeout is reached or a connection to the server
+ // is made.
+ config.DialTimeout = 0
+ // Ping the server to verify if the server connection is still valid
+ config.DialKeepAliveTime = clientOptions.KeepAliveHeartbeat
+ // Timeout if the server does not reply within 15 seconds and close the
+ // connection. Ideally it should be lower than staleLockTimeout
+ config.DialKeepAliveTimeout = clientOptions.KeepAliveTimeout
+
+ // Use the shared etcd client logger to prevent unnecessary allocations.
+ config.Logger = etcd3ClientLogger
+
+ c, err := client.New(*config)
+ if err != nil {
+ return nil, err
+ }
+
+ log.WithFields(logrus.Fields{
+ "endpoints": config.Endpoints,
+ "config": cfgPath,
+ }).Info("Connecting to etcd server...")
+
+ var ls concurrency.Session
+ errorChan := make(chan error)
+
+ limiter := ciliumrate.NewAPILimiter(makeSessionName("etcd", opts), ciliumrate.APILimiterParameters{
+ RateLimit: rate.Limit(clientOptions.RateLimit),
+ RateBurst: clientOptions.RateLimit,
+ ParallelRequests: clientOptions.MaxInflight,
+ }, ciliumratemetrics.APILimiterObserver())
+
+ ec := &etcdClient{
+ client: c,
+ config: config,
+ configPath: cfgPath,
+ lockSession: &ls,
+ firstSession: make(chan struct{}),
+ controllers: controller.NewManager(),
+ latestStatusSnapshot: "Waiting for initial connection to be established",
+ stopStatusChecker: make(chan struct{}),
+ extraOptions: opts,
+ limiter: limiter,
+ listBatchSize: clientOptions.ListBatchSize,
+ statusCheckErrors: make(chan error, 128),
+ }
+
+ leaseTTL := option.Config.KVstoreLeaseTTL
+ if option.Config.KVstoreLeaseTTL == 0 {
+ leaseTTL = defaults.KVstoreLeaseTTL
+ }
+
+ ec.leaseManager = newEtcdLeaseManager(c, leaseTTL, etcdMaxKeysPerLease, ec.expiredLeaseObserver, ec.getLogger())
+
+ // create session in parallel as this is a blocking operation
+ go func() {
+ lockSession, err := concurrency.NewSession(c, concurrency.WithTTL(int(defaults.LockLeaseTTL.Seconds())))
+ if err != nil {
+ errorChan <- err
+ close(errorChan)
+ return
+ }
+
+ ec.RWMutex.Lock()
+ ls = *lockSession
+ ec.RWMutex.Unlock()
+
+ log.Infof("Got lock lease ID %x", ls.Lease())
+ close(errorChan)
+ }()
+
+ handleSessionError := func(err error) {
+ ec.RWMutex.Lock()
+ ec.sessionErr = err
+ ec.RWMutex.Unlock()
+
+ ec.statusLock.Lock()
+ ec.latestStatusSnapshot = "Failed to establish initial connection"
+ ec.latestErrorStatus = err
+ ec.statusLock.Unlock()
+
+ errChan <- err
+ ec.statusCheckErrors <- err
+ }
+
+ // wait for session to be created also in parallel
+ go func() {
+ err := func() (err error) {
+ select {
+ case err = <-errorChan:
+ if err != nil {
+ return err
+ }
+ case <-time.After(initialConnectionTimeout):
+ return fmt.Errorf("timed out while waiting for etcd session. Ensure that etcd is running on %s", config.Endpoints)
+ }
+
+ ec.getLogger().Info("Initial etcd session established")
+
+ if err = ec.checkMinVersion(ctx, versionCheckTimeout); err != nil {
+ return fmt.Errorf("unable to validate etcd version: %s", err)
+ }
+
+ return nil
+ }()
+
+ if err != nil {
+ handleSessionError(err)
+ close(errChan)
+ close(ec.firstSession)
+ close(ec.statusCheckErrors)
+ return
+ }
+
+ close(errChan)
+ close(ec.firstSession)
+
+ go ec.statusChecker()
+
+ watcher := ec.ListAndWatch(ctx, HeartbeatPath, HeartbeatPath, 128)
+
+ for {
+ select {
+ case _, ok := <-watcher.Events:
+ if !ok {
+ log.Debug("Stopping heartbeat watcher")
+ watcher.Stop()
+ return
+ }
+
+ // It is tempting to compare against the
+ // heartbeat value stored in the key. However,
+ // this would require the time on all nodes to
+ // be synchronized. Instead, assume current
+ // time and print the heartbeat value in debug
+ // messages for troubleshooting
+ ec.RWMutex.Lock()
+ ec.lastHeartbeat = time.Now()
+ ec.RWMutex.Unlock()
+ log.Debug("Received update notification of heartbeat")
+ case <-ctx.Done():
+ return
+ }
+ }
+ }()
+
+ ec.controllers.UpdateController(
+ makeSessionName(etcdLockSessionRenewNamePrefix, opts),
+ controller.ControllerParams{
+ Group: etcdLockSessionRenewControllerGroup,
+ // Stop controller function when etcd client is terminating
+ Context: ec.client.Ctx(),
+ DoFunc: func(ctx context.Context) error {
+ return ec.renewLockSession(ctx)
+ },
+ RunInterval: time.Duration(10) * time.Millisecond,
+ },
+ )
+
+ return ec, nil
+}
+
+// makeSessionName builds up a session/locksession controller name
+// clusterName is expected to be empty for main kvstore connection
+func makeSessionName(sessionPrefix string, opts *ExtraOptions) string {
+ if opts != nil && opts.ClusterName != "" {
+ return sessionPrefix + "-" + opts.ClusterName
+ }
+ return sessionPrefix
+}
+
+func getEPVersion(ctx context.Context, c client.Maintenance, etcdEP string, timeout time.Duration) (semver.Version, error) {
+ ctxTimeout, cancel := context.WithTimeout(ctx, timeout)
+ defer cancel()
+ sr, err := c.Status(ctxTimeout, etcdEP)
+ if err != nil {
+ return semver.Version{}, Hint(err)
+ }
+ v, err := versioncheck.Version(sr.Version)
+ if err != nil {
+ return semver.Version{}, fmt.Errorf("error parsing server version %q: %s", sr.Version, Hint(err))
+ }
+ return v, nil
+}
+
+func (e *etcdClient) sessionError() (err error) {
+ e.RWMutex.RLock()
+ err = e.sessionErr
+ e.RWMutex.RUnlock()
+ return
+}
+
+// checkMinVersion checks the minimal version running on etcd cluster. This
+// function should be run whenever the etcd client is connected for the first
+// time and whenever the session is renewed.
+func (e *etcdClient) checkMinVersion(ctx context.Context, timeout time.Duration) error {
+ eps := e.client.Endpoints()
+
+ for _, ep := range eps {
+ v, err := getEPVersion(ctx, e.client.Maintenance, ep, timeout)
+ if err != nil {
+ e.getLogger().WithError(Hint(err)).WithField(fieldEtcdEndpoint, ep).
+ Warn("Unable to verify version of etcd endpoint")
+ continue
+ }
+
+ if !minRequiredVersion(v) {
+ return fmt.Errorf("minimal etcd version not met in %q, required: %s, found: %s",
+ ep, minRequiredVersionStr, v.String())
+ }
+
+ e.getLogger().WithFields(logrus.Fields{
+ fieldEtcdEndpoint: ep,
+ "version": v,
+ }).Info("Successfully verified version of etcd endpoint")
+ }
+
+ if len(eps) == 0 {
+ e.getLogger().Warn("Minimal etcd version unknown: No etcd endpoints available")
+ }
+
+ return nil
+}
+
+func (e *etcdClient) waitForInitialSession(ctx context.Context) error {
+ select {
+ case <-e.firstSession:
+ if err := e.sessionError(); err != nil {
+ return err
+ }
+ case <-ctx.Done():
+ return fmt.Errorf("interrupt while waiting for initial session to be established: %w", ctx.Err())
+ }
+
+ return nil
+}
+
+func (e *etcdClient) LockPath(ctx context.Context, path string) (KVLocker, error) {
+ if err := e.waitForInitialSession(ctx); err != nil {
+ return nil, err
+ }
+
+ // Create the context first so that if a connectivity issue causes the
+ // RLock acquisition below to block, this timeout will run concurrently
+ // with the timeouts in renewSession() rather than running serially.
+ ctx, cancel := context.WithTimeout(ctx, time.Minute)
+ defer cancel()
+
+ e.RLock()
+ mu := concurrency.NewMutex(e.lockSession, path)
+ leaseID := e.lockSession.Lease()
+ e.RUnlock()
+
+ err := mu.Lock(ctx)
+ if err != nil {
+ e.checkLockSession(err, leaseID)
+ return nil, Hint(err)
+ }
+
+ return &etcdMutex{mutex: mu}, nil
+}
+
+func (e *etcdClient) DeletePrefix(ctx context.Context, path string) (err error) {
+ defer func() {
+ Trace("DeletePrefix", err, logrus.Fields{fieldPrefix: path})
+ }()
+ lr, err := e.limiter.Wait(ctx)
+ if err != nil {
+ return Hint(err)
+ }
+
+ defer func(duration *spanstat.SpanStat) {
+ increaseMetric(path, metricDelete, "DeletePrefix", duration.EndError(err).Total(), err)
+ }(spanstat.Start())
+
+ _, err = e.client.Delete(ctx, path, client.WithPrefix())
+ // Using lr.Error for convenience, as it matches lr.Done() when err is nil
+ lr.Error(err)
+
+ if err == nil {
+ e.leaseManager.ReleasePrefix(path)
+ }
+
+ return Hint(err)
+}
+
+// Watch starts watching for changes in a prefix
+func (e *etcdClient) Watch(ctx context.Context, w *Watcher) {
+ localCache := watcherCache{}
+ listSignalSent := false
+
+ defer func() {
+ close(w.Events)
+ w.stopWait.Done()
+
+ // The watch might be aborted by closing
+ // the context instead of calling
+ // w.Stop() from outside. In that case
+ // we make sure to close everything and
+ // as this uses sync.Once it can be
+ // run multiple times (if that's the case).
+ w.Stop()
+ }()
+
+ scopedLog := e.getLogger().WithFields(logrus.Fields{
+ fieldWatcher: w,
+ fieldPrefix: w.Prefix,
+ })
+
+ err := <-e.Connected(ctx)
+ if err != nil {
+ // The context ended or the etcd client was closed
+ // before connectivity was achieved
+ return
+ }
+
+ // errLimiter is used to rate limit the retry of the first Get request in case an error
+ // has occurred, to prevent overloading the etcd server due to the more aggressive
+ // default rate limiter.
+ errLimiter := backoff.Exponential{
+ Name: "etcd-list-before-watch-error",
+ Min: 50 * time.Millisecond,
+ Max: 1 * time.Minute,
+ }
+
+ if e.extraOptions != nil {
+ errLimiter.NodeManager = backoff.NewNodeManager(e.extraOptions.ClusterSizeDependantInterval)
+ }
+
+reList:
+ for {
+ select {
+ case <-e.client.Ctx().Done():
+ return
+ case <-ctx.Done():
+ return
+ default:
+ }
+
+ lr, err := e.limiter.Wait(ctx)
+ if err != nil {
+ continue
+ }
+ kvs, revision, err := e.paginatedList(ctx, scopedLog, w.Prefix)
+ if err != nil {
+ lr.Error(err)
+ scopedLog.WithError(Hint(err)).Warn("Unable to list keys before starting watcher")
+ errLimiter.Wait(ctx)
+ continue
+ }
+ lr.Done()
+ errLimiter.Reset()
+
+ for _, key := range kvs {
+ t := EventTypeCreate
+ if localCache.Exists(key.Key) {
+ t = EventTypeModify
+ }
+
+ localCache.MarkInUse(key.Key)
+ scopedLog.Debugf("Emitting list result as %s event for %s=%s", t, key.Key, key.Value)
+
+ queueStart := spanstat.Start()
+ w.Events <- KeyValueEvent{
+ Key: string(key.Key),
+ Value: key.Value,
+ Typ: t,
+ }
+ trackEventQueued(string(key.Key), t, queueStart.End(true).Total())
+ }
+
+ nextRev := revision + 1
+
+ // Send out deletion events for all keys that were deleted
+ // between our last known revision and the latest revision
+ // received via Get
+ localCache.RemoveDeleted(func(k string) {
+ event := KeyValueEvent{
+ Key: k,
+ Typ: EventTypeDelete,
+ }
+
+ scopedLog.Debugf("Emitting EventTypeDelete event for %s", k)
+ queueStart := spanstat.Start()
+ w.Events <- event
+ trackEventQueued(k, EventTypeDelete, queueStart.End(true).Total())
+ })
+
+ // Only send the list signal once
+ if !listSignalSent {
+ w.Events <- KeyValueEvent{Typ: EventTypeListDone}
+ listSignalSent = true
+ }
+
+ recreateWatcher:
+ scopedLog.WithField(fieldRev, nextRev).Debug("Starting to watch a prefix")
+
+ lr, err = e.limiter.Wait(ctx)
+ if err != nil {
+ select {
+ case <-e.client.Ctx().Done():
+ return
+ case <-ctx.Done():
+ return
+ default:
+ goto recreateWatcher
+ }
+ }
+
+ etcdWatch := e.client.Watch(client.WithRequireLeader(ctx), w.Prefix,
+ client.WithPrefix(), client.WithRev(nextRev))
+ lr.Done()
+
+ for {
+ select {
+ case <-e.client.Ctx().Done():
+ return
+ case <-ctx.Done():
+ return
+ case <-w.stopWatch:
+ return
+ case r, ok := <-etcdWatch:
+ if !ok {
+ time.Sleep(50 * time.Millisecond)
+ goto recreateWatcher
+ }
+
+ scopedLog := scopedLog.WithField(fieldRev, r.Header.Revision)
+
+ if err := r.Err(); err != nil {
+ // We tried to watch on a compacted
+ // revision that may no longer exist,
+ // recreate the watcher and try to
+ // watch on the next possible revision
+ if errors.Is(err, v3rpcErrors.ErrCompacted) {
+ scopedLog.WithError(Hint(err)).Debug("Tried watching on compacted revision")
+ }
+
+ // mark all local keys in state for
+ // deletion unless the upcoming GET
+ // marks them alive
+ localCache.MarkAllForDeletion()
+
+ goto reList
+ }
+
+ nextRev = r.Header.Revision + 1
+ scopedLog.Debugf("Received event from etcd: %+v", r)
+
+ for _, ev := range r.Events {
+ event := KeyValueEvent{
+ Key: string(ev.Kv.Key),
+ Value: ev.Kv.Value,
+ }
+
+ switch {
+ case ev.Type == client.EventTypeDelete:
+ event.Typ = EventTypeDelete
+ localCache.RemoveKey(ev.Kv.Key)
+ case ev.IsCreate():
+ event.Typ = EventTypeCreate
+ localCache.MarkInUse(ev.Kv.Key)
+ default:
+ event.Typ = EventTypeModify
+ localCache.MarkInUse(ev.Kv.Key)
+ }
+
+ scopedLog.Debugf("Emitting %s event for %s=%s", event.Typ, event.Key, event.Value)
+
+ queueStart := spanstat.Start()
+ w.Events <- event
+ trackEventQueued(string(ev.Kv.Key), event.Typ, queueStart.End(true).Total())
+ }
+ }
+ }
+ }
+}
+
+func (e *etcdClient) paginatedList(ctx context.Context, log *logrus.Entry, prefix string) (kvs []*mvccpb.KeyValue, revision int64, err error) {
+ start, end := prefix, client.GetPrefixRangeEnd(prefix)
+
+ for {
+ res, err := e.client.Get(ctx, start, client.WithRange(end),
+ client.WithSort(client.SortByKey, client.SortAscend),
+ client.WithRev(revision), client.WithSerializable(),
+ client.WithLimit(int64(e.listBatchSize)),
+ )
+ if err != nil {
+ return nil, 0, err
+ }
+
+ log.WithFields(logrus.Fields{
+ fieldNumEntries: len(res.Kvs),
+ fieldRemainingEntries: res.Count - int64(len(res.Kvs)),
+ }).Debug("Received list response from etcd")
+
+ if kvs == nil {
+ kvs = make([]*mvccpb.KeyValue, 0, res.Count)
+ }
+
+ kvs = append(kvs, res.Kvs...)
+
+ revision = res.Header.Revision
+ if !res.More || len(res.Kvs) == 0 {
+ return kvs, revision, nil
+ }
+
+ start = string(res.Kvs[len(res.Kvs)-1].Key) + "\x00"
+ }
+}
+
+func (e *etcdClient) determineEndpointStatus(ctx context.Context, endpointAddress string) (string, error) {
+ ctxTimeout, cancel := context.WithTimeout(ctx, statusCheckTimeout)
+ defer cancel()
+
+ e.getLogger().Debugf("Checking status to etcd endpoint %s", endpointAddress)
+
+ status, err := e.client.Status(ctxTimeout, endpointAddress)
+ if err != nil {
+ return fmt.Sprintf("%s - %s", endpointAddress, err), Hint(err)
+ }
+
+ str := fmt.Sprintf("%s - %s", endpointAddress, status.Version)
+ if status.Header.MemberId == status.Leader {
+ str += " (Leader)"
+ }
+
+ return str, nil
+}
+
+func (e *etcdClient) statusChecker() {
+ ctx := context.Background()
+
+ consecutiveQuorumErrors := 0
+
+ statusTimer, statusTimerDone := inctimer.New()
+ defer statusTimerDone()
+
+ for {
+ newStatus := []string{}
+ ok := 0
+
+ quorumError := e.isConnectedAndHasQuorum(ctx)
+
+ endpoints := e.client.Endpoints()
+ for _, ep := range endpoints {
+ st, err := e.determineEndpointStatus(ctx, ep)
+ if err == nil {
+ ok++
+ }
+
+ newStatus = append(newStatus, st)
+ }
+
+ allConnected := len(endpoints) == ok
+
+ e.RWMutex.RLock()
+ lockSessionLeaseID := e.lockSession.Lease()
+ lastHeartbeat := e.lastHeartbeat
+ e.RWMutex.RUnlock()
+
+ if heartbeatDelta := time.Since(lastHeartbeat); !lastHeartbeat.IsZero() && heartbeatDelta > 2*HeartbeatWriteInterval {
+ recordQuorumError("no event received")
+ quorumError = fmt.Errorf("%s since last heartbeat update has been received", heartbeatDelta)
+ }
+
+ quorumString := "true"
+ if quorumError != nil {
+ quorumString = quorumError.Error()
+ consecutiveQuorumErrors++
+ quorumString += fmt.Sprintf(", consecutive-errors=%d", consecutiveQuorumErrors)
+ } else {
+ consecutiveQuorumErrors = 0
+ }
+
+ e.statusLock.Lock()
+
+ switch {
+ case consecutiveQuorumErrors > option.Config.KVstoreMaxConsecutiveQuorumErrors:
+ e.latestErrorStatus = fmt.Errorf("quorum check failed %d times in a row: %s",
+ consecutiveQuorumErrors, quorumError)
+ e.latestStatusSnapshot = e.latestErrorStatus.Error()
+ case len(endpoints) > 0 && ok == 0:
+ e.latestErrorStatus = fmt.Errorf("not able to connect to any etcd endpoints")
+ e.latestStatusSnapshot = e.latestErrorStatus.Error()
+ default:
+ e.latestErrorStatus = nil
+ e.latestStatusSnapshot = fmt.Sprintf("etcd: %d/%d connected, leases=%d, lock lease-ID=%x, has-quorum=%s: %s",
+ ok, len(endpoints), e.leaseManager.TotalLeases(), lockSessionLeaseID, quorumString, strings.Join(newStatus, "; "))
+ }
+
+ e.statusLock.Unlock()
+ if e.latestErrorStatus != nil {
+ select {
+ case e.statusCheckErrors <- e.latestErrorStatus:
+ default:
+ // Channel's buffer is full, skip sending errors to the channel but log warnings instead
+ log.WithError(e.latestErrorStatus).
+ Warning("Status check error channel is full, dropping this error")
+ }
+ }
+
+ select {
+ case <-e.stopStatusChecker:
+ close(e.statusCheckErrors)
+ return
+ case <-statusTimer.After(e.extraOptions.StatusCheckInterval(allConnected)):
+ }
+ }
+}
+
+func (e *etcdClient) Status() (string, error) {
+ e.statusLock.RLock()
+ defer e.statusLock.RUnlock()
+
+ return e.latestStatusSnapshot, Hint(e.latestErrorStatus)
+}
+
+// GetIfLocked returns value of key if the client is still holding the given lock.
+func (e *etcdClient) GetIfLocked(ctx context.Context, key string, lock KVLocker) (bv []byte, err error) {
+ defer func() {
+ Trace("GetIfLocked", err, logrus.Fields{fieldKey: key, fieldValue: string(bv)})
+ }()
+ lr, err := e.limiter.Wait(ctx)
+ if err != nil {
+ return nil, Hint(err)
+ }
+ defer func(duration *spanstat.SpanStat) {
+ increaseMetric(key, metricRead, "GetLocked", duration.EndError(err).Total(), err)
+ }(spanstat.Start())
+
+ opGet := client.OpGet(key)
+ cmp := lock.Comparator().(client.Cmp)
+ txnReply, err := e.client.Txn(ctx).If(cmp).Then(opGet).Commit()
+ if err == nil && !txnReply.Succeeded {
+ err = ErrLockLeaseExpired
+ }
+
+ if err != nil {
+ lr.Error(err)
+ return nil, Hint(err)
+ }
+
+ lr.Done()
+ getR := txnReply.Responses[0].GetResponseRange()
+ // RangeResponse
+ if getR.Count == 0 {
+ return nil, nil
+ }
+ return getR.Kvs[0].Value, nil
+}
+
+// Get returns value of key
+func (e *etcdClient) Get(ctx context.Context, key string) (bv []byte, err error) {
+ defer func() {
+ Trace("Get", err, logrus.Fields{fieldKey: key, fieldValue: string(bv)})
+ }()
+ lr, err := e.limiter.Wait(ctx)
+ if err != nil {
+ return nil, Hint(err)
+ }
+ defer func(duration *spanstat.SpanStat) {
+ increaseMetric(key, metricRead, "Get", duration.EndError(err).Total(), err)
+ }(spanstat.Start())
+
+ getR, err := e.client.Get(ctx, key)
+ if err != nil {
+ lr.Error(err)
+ return nil, Hint(err)
+ }
+ lr.Done()
+
+ if getR.Count == 0 {
+ return nil, nil
+ }
+ return getR.Kvs[0].Value, nil
+}
+
+// GetPrefixIfLocked returns the first key which matches the prefix and its value if the client is still holding the given lock.
+func (e *etcdClient) GetPrefixIfLocked(ctx context.Context, prefix string, lock KVLocker) (k string, bv []byte, err error) {
+ defer func() {
+ Trace("GetPrefixIfLocked", err, logrus.Fields{fieldPrefix: prefix, fieldKey: k, fieldValue: string(bv)})
+ }()
+ lr, err := e.limiter.Wait(ctx)
+ if err != nil {
+ return "", nil, Hint(err)
+ }
+ defer func(duration *spanstat.SpanStat) {
+ increaseMetric(prefix, metricRead, "GetPrefixLocked", duration.EndError(err).Total(), err)
+ }(spanstat.Start())
+
+ opGet := client.OpGet(prefix, client.WithPrefix(), client.WithLimit(1))
+ cmp := lock.Comparator().(client.Cmp)
+ txnReply, err := e.client.Txn(ctx).If(cmp).Then(opGet).Commit()
+ if err == nil && !txnReply.Succeeded {
+ err = ErrLockLeaseExpired
+ }
+
+ if err != nil {
+ lr.Error(err)
+ return "", nil, Hint(err)
+ }
+ lr.Done()
+
+ getR := txnReply.Responses[0].GetResponseRange()
+
+ if getR.Count == 0 {
+ return "", nil, nil
+ }
+ return string(getR.Kvs[0].Key), getR.Kvs[0].Value, nil
+}
+
+// GetPrefix returns the first key which matches the prefix and its value
+func (e *etcdClient) GetPrefix(ctx context.Context, prefix string) (k string, bv []byte, err error) {
+ defer func() {
+ Trace("GetPrefix", err, logrus.Fields{fieldPrefix: prefix, fieldKey: k, fieldValue: string(bv)})
+ }()
+ lr, err := e.limiter.Wait(ctx)
+ if err != nil {
+ return "", nil, Hint(err)
+ }
+ defer func(duration *spanstat.SpanStat) {
+ increaseMetric(prefix, metricRead, "GetPrefix", duration.EndError(err).Total(), err)
+ }(spanstat.Start())
+
+ getR, err := e.client.Get(ctx, prefix, client.WithPrefix(), client.WithLimit(1))
+ if err != nil {
+ lr.Error(err)
+ return "", nil, Hint(err)
+ }
+ lr.Done()
+
+ if getR.Count == 0 {
+ return "", nil, nil
+ }
+ return string(getR.Kvs[0].Key), getR.Kvs[0].Value, nil
+}
+
+// Set sets value of key
+func (e *etcdClient) Set(ctx context.Context, key string, value []byte) (err error) {
+ defer func() {
+ Trace("Set", err, logrus.Fields{fieldKey: key, fieldValue: string(value)})
+ }()
+ lr, err := e.limiter.Wait(ctx)
+ if err != nil {
+ return Hint(err)
+ }
+ defer func(duration *spanstat.SpanStat) {
+ increaseMetric(key, metricSet, "Set", duration.EndError(err).Total(), err)
+ }(spanstat.Start())
+
+ _, err = e.client.Put(ctx, key, string(value))
+ // Using lr.Error for convenience, as it matches lr.Done() when err is nil
+ lr.Error(err)
+ return Hint(err)
+}
+
+// DeleteIfLocked deletes a key if the client is still holding the given lock.
+func (e *etcdClient) DeleteIfLocked(ctx context.Context, key string, lock KVLocker) (err error) {
+ defer func() {
+ Trace("DeleteIfLocked", err, logrus.Fields{fieldKey: key})
+ }()
+ lr, err := e.limiter.Wait(ctx)
+ if err != nil {
+ return Hint(err)
+ }
+ defer func(duration *spanstat.SpanStat) {
+ increaseMetric(key, metricDelete, "DeleteLocked", duration.EndError(err).Total(), err)
+ }(spanstat.Start())
+
+ opDel := client.OpDelete(key)
+ cmp := lock.Comparator().(client.Cmp)
+ txnReply, err := e.client.Txn(ctx).If(cmp).Then(opDel).Commit()
+ if err == nil && !txnReply.Succeeded {
+ err = ErrLockLeaseExpired
+ }
+ if err == nil {
+ e.leaseManager.Release(key)
+ }
+
+ // Using lr.Error for convenience, as it matches lr.Done() when err is nil
+ lr.Error(err)
+ return Hint(err)
+}
+
+// Delete deletes a key
+func (e *etcdClient) Delete(ctx context.Context, key string) (err error) {
+ defer func() {
+ Trace("Delete", err, logrus.Fields{fieldKey: key})
+ }()
+ lr, err := e.limiter.Wait(ctx)
+ if err != nil {
+ return Hint(err)
+ }
+ defer func(duration *spanstat.SpanStat) {
+ increaseMetric(key, metricDelete, "Delete", duration.EndError(err).Total(), err)
+ }(spanstat.Start())
+
+ _, err = e.client.Delete(ctx, key)
+ // Using lr.Error for convenience, as it matches lr.Done() when err is nil
+ lr.Error(err)
+
+ if err == nil {
+ e.leaseManager.Release(key)
+ }
+
+ return Hint(err)
+}
+
+func (e *etcdClient) createOpPut(key string, value []byte, leaseID client.LeaseID) *client.Op {
+ if leaseID != 0 {
+ op := client.OpPut(key, string(value), client.WithLease(leaseID))
+ return &op
+ }
+
+ op := client.OpPut(key, string(value))
+ return &op
+}
+
+// UpdateIfLocked updates a key if the client is still holding the given lock.
+func (e *etcdClient) UpdateIfLocked(ctx context.Context, key string, value []byte, lease bool, lock KVLocker) (err error) {
+ defer func() {
+ Trace("UpdateIfLocked", err, logrus.Fields{fieldKey: key, fieldValue: string(value), fieldAttachLease: lease})
+ }()
+ if err := e.waitForInitialSession(ctx); err != nil {
+ return err
+ }
+ var leaseID client.LeaseID
+ if lease {
+ leaseID, err = e.leaseManager.GetLeaseID(ctx, key)
+ if err != nil {
+ return Hint(err)
+ }
+ }
+ lr, err := e.limiter.Wait(ctx)
+ if err != nil {
+ return Hint(err)
+ }
+ defer func(duration *spanstat.SpanStat) {
+ increaseMetric(key, metricSet, "UpdateIfLocked", duration.EndError(err).Total(), err)
+ }(spanstat.Start())
+
+ var txnReply *client.TxnResponse
+ opPut := client.OpPut(key, string(value), client.WithLease(leaseID))
+ cmp := lock.Comparator().(client.Cmp)
+ txnReply, err = e.client.Txn(ctx).If(cmp).Then(opPut).Commit()
+ e.leaseManager.CancelIfExpired(err, leaseID)
+
+ if err == nil && !txnReply.Succeeded {
+ err = ErrLockLeaseExpired
+ }
+
+ // Using lr.Error for convenience, as it matches lr.Done() when err is nil
+ lr.Error(err)
+ return Hint(err)
+}
+
+// Update creates or updates a key
+func (e *etcdClient) Update(ctx context.Context, key string, value []byte, lease bool) (err error) {
+ defer func() {
+ Trace("Update", err, logrus.Fields{fieldKey: key, fieldValue: string(value), fieldAttachLease: lease})
+ }()
+ if err = e.waitForInitialSession(ctx); err != nil {
+ return
+ }
+ var leaseID client.LeaseID
+ if lease {
+ leaseID, err = e.leaseManager.GetLeaseID(ctx, key)
+ if err != nil {
+ return Hint(err)
+ }
+ }
+ lr, err := e.limiter.Wait(ctx)
+ if err != nil {
+ return Hint(err)
+ }
+ defer func(duration *spanstat.SpanStat) {
+ increaseMetric(key, metricSet, "Update", duration.EndError(err).Total(), err)
+ }(spanstat.Start())
+
+ _, err = e.client.Put(ctx, key, string(value), client.WithLease(leaseID))
+ e.leaseManager.CancelIfExpired(err, leaseID)
+
+ // Using lr.Error for convenience, as it matches lr.Done() when err is nil
+ lr.Error(err)
+ return Hint(err)
+}
+
+// UpdateIfDifferentIfLocked updates a key if the value is different and if the client is still holding the given lock.
+func (e *etcdClient) UpdateIfDifferentIfLocked(ctx context.Context, key string, value []byte, lease bool, lock KVLocker) (recreated bool, err error) {
+ defer func() {
+ Trace("UpdateIfDifferentIfLocked", err, logrus.Fields{fieldKey: key, fieldValue: value, fieldAttachLease: lease, "recreated": recreated})
+ }()
+ if err = e.waitForInitialSession(ctx); err != nil {
+ return false, err
+ }
+ lr, err := e.limiter.Wait(ctx)
+ if err != nil {
+ return false, Hint(err)
+ }
+ duration := spanstat.Start()
+
+ cnds := lock.Comparator().(client.Cmp)
+ txnresp, err := e.client.Txn(ctx).If(cnds).Then(client.OpGet(key)).Commit()
+ // Using lr.Error for convenience, as it matches lr.Done() when err is nil
+ lr.Error(err)
+ increaseMetric(key, metricRead, "Get", duration.EndError(err).Total(), err)
+
+ // On error, attempt update blindly
+ if err != nil {
+ return true, e.UpdateIfLocked(ctx, key, value, lease, lock)
+ }
+
+ if !txnresp.Succeeded {
+ return false, ErrLockLeaseExpired
+ }
+
+ getR := txnresp.Responses[0].GetResponseRange()
+ if getR.Count == 0 {
+ return true, e.UpdateIfLocked(ctx, key, value, lease, lock)
+ }
+
+ if lease && !e.leaseManager.KeyHasLease(key, client.LeaseID(getR.Kvs[0].Lease)) {
+ return true, e.UpdateIfLocked(ctx, key, value, lease, lock)
+ }
+ // if value is not equal then update.
+ if !bytes.Equal(getR.Kvs[0].Value, value) {
+ return true, e.UpdateIfLocked(ctx, key, value, lease, lock)
+ }
+
+ return false, nil
+}
+
+// UpdateIfDifferent updates a key if the value is different
+func (e *etcdClient) UpdateIfDifferent(ctx context.Context, key string, value []byte, lease bool) (recreated bool, err error) {
+ defer func() {
+ Trace("UpdateIfDifferent", err, logrus.Fields{fieldKey: key, fieldValue: value, fieldAttachLease: lease, "recreated": recreated})
+ }()
+ if err = e.waitForInitialSession(ctx); err != nil {
+ return false, err
+ }
+ lr, err := e.limiter.Wait(ctx)
+ if err != nil {
+ return false, Hint(err)
+ }
+ duration := spanstat.Start()
+
+ getR, err := e.client.Get(ctx, key)
+ // Using lr.Error for convenience, as it matches lr.Done() when err is nil
+ lr.Error(err)
+ increaseMetric(key, metricRead, "Get", duration.EndError(err).Total(), err)
+ // On error, attempt update blindly
+ if err != nil || getR.Count == 0 {
+ return true, e.Update(ctx, key, value, lease)
+ }
+ if lease && !e.leaseManager.KeyHasLease(key, client.LeaseID(getR.Kvs[0].Lease)) {
+ return true, e.Update(ctx, key, value, lease)
+ }
+ // if value is not equal then update.
+ if !bytes.Equal(getR.Kvs[0].Value, value) {
+ return true, e.Update(ctx, key, value, lease)
+ }
+
+ return false, nil
+}
+
+// CreateOnlyIfLocked atomically creates a key if the client is still holding the given lock or fails if it already exists
+func (e *etcdClient) CreateOnlyIfLocked(ctx context.Context, key string, value []byte, lease bool, lock KVLocker) (success bool, err error) {
+ defer func() {
+ Trace("CreateOnlyIfLocked", err, logrus.Fields{fieldKey: key, fieldValue: value, fieldAttachLease: lease, "success": success})
+ }()
+ var leaseID client.LeaseID
+ if lease {
+ leaseID, err = e.leaseManager.GetLeaseID(ctx, key)
+ if err != nil {
+ return false, Hint(err)
+ }
+ }
+ lr, err := e.limiter.Wait(ctx)
+ if err != nil {
+ return false, Hint(err)
+ }
+ duration := spanstat.Start()
+
+ req := e.createOpPut(key, value, leaseID)
+ cnds := []client.Cmp{
+ client.Compare(client.Version(key), "=", 0),
+ lock.Comparator().(client.Cmp),
+ }
+
+ // We need to do a get in the else of the txn to detect if the lock is still
+ // valid or not.
+ opGets := []client.Op{
+ client.OpGet(key),
+ }
+ txnresp, err := e.client.Txn(ctx).If(cnds...).Then(*req).Else(opGets...).Commit()
+ increaseMetric(key, metricSet, "CreateOnlyLocked", duration.EndError(err).Total(), err)
+ if err != nil {
+ lr.Error(err)
+ e.leaseManager.CancelIfExpired(err, leaseID)
+ return false, Hint(err)
+ }
+ lr.Done()
+
+ // The txn can failed for the following reasons:
+ // - Key version is not zero;
+ // - Lock does not exist or is expired.
+ // For both of those cases, the key that we are comparing might or not
+ // exist, so we have:
+ // A - Key does not exist and lock does not exist => ErrLockLeaseExpired
+ // B - Key does not exist and lock exist => txn should succeed
+ // C - Key does exist, version is == 0 and lock does not exist => ErrLockLeaseExpired
+ // D - Key does exist, version is != 0 and lock does not exist => ErrLockLeaseExpired
+ // E - Key does exist, version is == 0 and lock does exist => txn should succeed
+ // F - Key does exist, version is != 0 and lock does exist => txn fails but returned is nil!
+
+ if !txnresp.Succeeded {
+ // case F
+ if len(txnresp.Responses[0].GetResponseRange().Kvs) != 0 &&
+ txnresp.Responses[0].GetResponseRange().Kvs[0].Version != 0 {
+ return false, nil
+ }
+
+ // case A, C and D
+ return false, ErrLockLeaseExpired
+ }
+
+ // case B and E
+ return true, nil
+}
+
+// CreateOnly creates a key with the value and will fail if the key already exists
+func (e *etcdClient) CreateOnly(ctx context.Context, key string, value []byte, lease bool) (success bool, err error) {
+ defer func() {
+ Trace("CreateOnly", err, logrus.Fields{fieldKey: key, fieldValue: value, fieldAttachLease: lease, "success": success})
+ }()
+ var leaseID client.LeaseID
+ if lease {
+ leaseID, err = e.leaseManager.GetLeaseID(ctx, key)
+ if err != nil {
+ return false, Hint(err)
+ }
+ }
+ lr, err := e.limiter.Wait(ctx)
+ if err != nil {
+ return false, Hint(err)
+ }
+ defer func(duration *spanstat.SpanStat) {
+ increaseMetric(key, metricSet, "CreateOnly", duration.EndError(err).Total(), err)
+ }(spanstat.Start())
+
+ req := e.createOpPut(key, value, leaseID)
+ cond := client.Compare(client.Version(key), "=", 0)
+
+ txnresp, err := e.client.Txn(ctx).If(cond).Then(*req).Commit()
+
+ if err != nil {
+ lr.Error(err)
+ e.leaseManager.CancelIfExpired(err, leaseID)
+ return false, Hint(err)
+ }
+
+ lr.Done()
+ return txnresp.Succeeded, nil
+}
+
+// CreateIfExists creates a key with the value only if key condKey exists
+func (e *etcdClient) CreateIfExists(ctx context.Context, condKey, key string, value []byte, lease bool) (err error) {
+ defer func() {
+ Trace("CreateIfExists", err, logrus.Fields{fieldKey: key, fieldValue: string(value), fieldCondition: condKey, fieldAttachLease: lease})
+ }()
+ var leaseID client.LeaseID
+ if lease {
+ leaseID, err = e.leaseManager.GetLeaseID(ctx, key)
+ if err != nil {
+ return Hint(err)
+ }
+ }
+ lr, err := e.limiter.Wait(ctx)
+ if err != nil {
+ return Hint(err)
+ }
+ duration := spanstat.Start()
+
+ req := e.createOpPut(key, value, leaseID)
+ cond := client.Compare(client.Version(condKey), "!=", 0)
+ txnresp, err := e.client.Txn(ctx).If(cond).Then(*req).Commit()
+
+ increaseMetric(key, metricSet, "CreateIfExists", duration.EndError(err).Total(), err)
+ if err != nil {
+ lr.Error(err)
+ e.leaseManager.CancelIfExpired(err, leaseID)
+ return Hint(err)
+ }
+ lr.Done()
+
+ if !txnresp.Succeeded {
+ return fmt.Errorf("create was unsuccessful")
+ }
+
+ return nil
+}
+
+// FIXME: When we rebase to etcd 3.3
+//
+// DeleteOnZeroCount deletes the key if no matching keys for prefix exist
+//func (e *etcdClient) DeleteOnZeroCount(key, prefix string) error {
+// txnresp, err := e.client.Txn(ctx.TODO()).
+// If(client.Compare(client.Version(prefix).WithPrefix(), "=", 0)).
+// Then(client.OpDelete(key)).
+// Commit()
+// if err != nil {
+// return err
+// }
+//
+// if txnresp.Succeeded == false {
+// return fmt.Errorf("delete was unsuccessful")
+// }
+//
+// return nil
+//}
+
+// ListPrefixIfLocked returns a list of keys matching the prefix only if the client is still holding the given lock.
+func (e *etcdClient) ListPrefixIfLocked(ctx context.Context, prefix string, lock KVLocker) (v KeyValuePairs, err error) {
+ defer func() {
+ Trace("ListPrefixIfLocked", err, logrus.Fields{fieldPrefix: prefix, fieldNumEntries: len(v)})
+ }()
+ lr, err := e.limiter.Wait(ctx)
+ if err != nil {
+ return nil, Hint(err)
+ }
+ defer func(duration *spanstat.SpanStat) {
+ increaseMetric(prefix, metricRead, "ListPrefixLocked", duration.EndError(err).Total(), err)
+ }(spanstat.Start())
+
+ opGet := client.OpGet(prefix, client.WithPrefix())
+ cmp := lock.Comparator().(client.Cmp)
+ txnReply, err := e.client.Txn(ctx).If(cmp).Then(opGet).Commit()
+ if err == nil && !txnReply.Succeeded {
+ err = ErrLockLeaseExpired
+ }
+ if err != nil {
+ lr.Error(err)
+ return nil, Hint(err)
+ }
+ lr.Done()
+ getR := txnReply.Responses[0].GetResponseRange()
+
+ pairs := KeyValuePairs(make(map[string]Value, getR.Count))
+ for i := int64(0); i < getR.Count; i++ {
+ pairs[string(getR.Kvs[i].Key)] = Value{
+ Data: getR.Kvs[i].Value,
+ ModRevision: uint64(getR.Kvs[i].ModRevision),
+ }
+
+ }
+
+ return pairs, nil
+}
+
+// ListPrefix returns a map of matching keys
+func (e *etcdClient) ListPrefix(ctx context.Context, prefix string) (v KeyValuePairs, err error) {
+ defer func() {
+ Trace("ListPrefix", err, logrus.Fields{fieldPrefix: prefix, fieldNumEntries: len(v)})
+ }()
+ lr, err := e.limiter.Wait(ctx)
+ if err != nil {
+ return nil, Hint(err)
+ }
+ defer func(duration *spanstat.SpanStat) {
+ increaseMetric(prefix, metricRead, "ListPrefix", duration.EndError(err).Total(), err)
+ }(spanstat.Start())
+
+ getR, err := e.client.Get(ctx, prefix, client.WithPrefix())
+ if err != nil {
+ lr.Error(err)
+ return nil, Hint(err)
+ }
+ lr.Done()
+
+ pairs := KeyValuePairs(make(map[string]Value, getR.Count))
+ for i := int64(0); i < getR.Count; i++ {
+ pairs[string(getR.Kvs[i].Key)] = Value{
+ Data: getR.Kvs[i].Value,
+ ModRevision: uint64(getR.Kvs[i].ModRevision),
+ LeaseID: getR.Kvs[i].Lease,
+ }
+
+ }
+
+ return pairs, nil
+}
+
+// Close closes the etcd session
+func (e *etcdClient) Close(ctx context.Context) {
+ close(e.stopStatusChecker)
+ sessionErr := e.waitForInitialSession(ctx)
+ if e.controllers != nil {
+ e.controllers.RemoveAll()
+ }
+ e.RLock()
+ defer e.RUnlock()
+ // Only close e.lockSession if the initial session was successful
+ if sessionErr == nil {
+ if err := e.lockSession.Close(); err != nil {
+ e.getLogger().WithError(err).Warning("Failed to revoke lock session while closing etcd client")
+ }
+ }
+ if e.client != nil {
+ if err := e.client.Close(); err != nil {
+ e.getLogger().WithError(err).Warning("Failed to close etcd client")
+ }
+ }
+
+ if e.leaseManager != nil {
+ // Wait until all child goroutines spawned by the lease manager have terminated.
+ e.leaseManager.Wait()
+ }
+}
+
+// GetCapabilities returns the capabilities of the backend
+func (e *etcdClient) GetCapabilities() Capabilities {
+ return Capabilities(CapabilityCreateIfExists)
+}
+
+// Encode encodes a binary slice into a character set that the backend supports
+func (e *etcdClient) Encode(in []byte) (out string) {
+ defer func() { Trace("Encode", nil, logrus.Fields{"in": in, "out": out}) }()
+ return string(in)
+}
+
+// Decode decodes a key previously encoded back into the original binary slice
+func (e *etcdClient) Decode(in string) (out []byte, err error) {
+ defer func() { Trace("Decode", err, logrus.Fields{"in": in, "out": out}) }()
+ return []byte(in), nil
+}
+
+// ListAndWatch implements the BackendOperations.ListAndWatch using etcd
+func (e *etcdClient) ListAndWatch(ctx context.Context, name, prefix string, chanSize int) *Watcher {
+ w := newWatcher(name, prefix, chanSize)
+
+ e.getLogger().WithField(fieldWatcher, w).Debug("Starting watcher...")
+
+ go e.Watch(ctx, w)
+
+ return w
+}
+
+// RegisterLeaseExpiredObserver registers a function which is executed when
+// the lease associated with a key having the given prefix is detected as expired.
+// If the function is nil, the previous observer (if any) is unregistered.
+func (e *etcdClient) RegisterLeaseExpiredObserver(prefix string, fn func(key string)) {
+ if fn == nil {
+ e.leaseExpiredObservers.Delete(prefix)
+ } else {
+ e.leaseExpiredObservers.Store(prefix, fn)
+ }
+}
+
+func (e *etcdClient) expiredLeaseObserver(key string) {
+ e.leaseExpiredObservers.Range(func(prefix, fn any) bool {
+ if strings.HasPrefix(key, prefix.(string)) {
+ fn.(func(string))(key)
+ }
+ return true
+ })
+}
+
+// UserEnforcePresence creates a user in etcd if not already present, and grants the specified roles.
+func (e *etcdClient) UserEnforcePresence(ctx context.Context, name string, roles []string) error {
+ scopedLog := e.getLogger().WithField(FieldUser, name)
+
+ scopedLog.Debug("Creating user")
+ _, err := e.client.Auth.UserAddWithOptions(ctx, name, "", &client.UserAddOptions{NoPassword: true})
+ if err != nil {
+ if errors.Is(err, v3rpcErrors.ErrUserAlreadyExist) {
+ scopedLog.Debug("User already exists")
+ } else {
+ return err
+ }
+ }
+
+ for _, role := range roles {
+ scopedLog.WithField(FieldRole, role).Debug("Granting role to user")
+
+ _, err := e.client.Auth.UserGrantRole(ctx, name, role)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// UserEnforcePresence deletes a user from etcd, if present.
+func (e *etcdClient) UserEnforceAbsence(ctx context.Context, name string) error {
+ scopedLog := e.getLogger().WithField(FieldUser, name)
+
+ scopedLog.Debug("Deleting user")
+ _, err := e.client.Auth.UserDelete(ctx, name)
+ if err != nil {
+ if errors.Is(err, v3rpcErrors.ErrUserNotFound) {
+ scopedLog.Debug("User not found")
+ } else {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// SplitK8sServiceURL returns the service name and namespace for the given address.
+// If the given address is not parseable or it is not the format
+// '://>.[optional]', returns an error.
+func SplitK8sServiceURL(address string) (string, string, error) {
+ u, err := url.Parse(address)
+ if err != nil {
+ return "", "", err
+ }
+ // typical service name "cilium-etcd-client.kube-system.svc"
+ names := strings.Split(u.Hostname(), ".")
+ if len(names) >= 2 {
+ return names[0], names[1], nil
+ }
+ return "", "",
+ fmt.Errorf("invalid service name. expecting .[optional], got: %s", address)
+}
+
+// IsEtcdOperator returns the service name if the configuration is setting up an
+// etcd-operator. If the configuration explicitly states it is configured
+// to connect to an etcd operator, e.g. with etcd.operator=true, the returned
+// service name is the first found within the configuration specified.
+func IsEtcdOperator(selectedBackend string, opts map[string]string, k8sNamespace string) (string, bool) {
+ if selectedBackend != EtcdBackendName {
+ return "", false
+ }
+
+ isEtcdOperator := strings.ToLower(opts[isEtcdOperatorOption]) == "true"
+
+ fqdnIsEtcdOperator := func(address string) bool {
+ svcName, ns, err := SplitK8sServiceURL(address)
+ return err == nil &&
+ svcName == "cilium-etcd-client" &&
+ ns == k8sNamespace
+ }
+
+ fqdn := opts[EtcdAddrOption]
+ if len(fqdn) != 0 {
+ if fqdnIsEtcdOperator(fqdn) || isEtcdOperator {
+ return fqdn, true
+ }
+ return "", false
+ }
+
+ bm := newEtcdModule()
+ err := bm.setConfig(opts)
+ if err != nil {
+ return "", false
+ }
+ etcdConfig := bm.getConfig()[EtcdOptionConfig]
+ if len(etcdConfig) == 0 {
+ return "", false
+ }
+
+ cfg, err := newConfig(etcdConfig)
+ if err != nil {
+ log.WithError(err).Error("Unable to read etcd configuration.")
+ return "", false
+ }
+ for _, endpoint := range cfg.Endpoints {
+ if fqdnIsEtcdOperator(endpoint) || isEtcdOperator {
+ return endpoint, true
+ }
+ }
+
+ return "", false
+}
+
+// newConfig is a wrapper of clientyaml.NewConfig. Since etcd has deprecated
+// the `ca-file` field from yamlConfig in v3.4, the clientyaml.NewConfig won't
+// read that field from the etcd configuration file making Cilium fail to
+// connect to a TLS-enabled etcd server. Since we should have deprecated the
+// usage of this field a long time ago, in this galaxy, we will have this
+// wrapper function as a workaround which will still use the `ca-file` field to
+// avoid users breaking their connectivity to etcd when upgrading Cilium.
+// TODO remove this wrapper in cilium >= 1.8
+func newConfig(fpath string) (*client.Config, error) {
+ cfg, err := clientyaml.NewConfig(fpath)
+ if err != nil {
+ return nil, err
+ }
+ if cfg.TLS == nil || cfg.TLS.RootCAs != nil {
+ return cfg, nil
+ }
+
+ yc := &yamlConfig{}
+ b, err := os.ReadFile(fpath)
+ if err != nil {
+ return nil, err
+ }
+ err = yaml.Unmarshal(b, yc)
+ if err != nil {
+ return nil, err
+ }
+ if yc.InsecureTransport {
+ return cfg, nil
+ }
+
+ if yc.CAfile != "" {
+ cp, err := tlsutil.NewCertPool([]string{yc.CAfile})
+ if err != nil {
+ return nil, err
+ }
+ cfg.TLS.RootCAs = cp
+ }
+ return cfg, nil
+}
+
+// reload on-disk certificate and key when needed
+func getClientCertificateReloader(fpath string) (func(*tls.CertificateRequestInfo) (*tls.Certificate, error), error) {
+ yc := &yamlKeyPairConfig{}
+ b, err := os.ReadFile(fpath)
+ if err != nil {
+ return nil, err
+ }
+ err = yaml.Unmarshal(b, yc)
+ if err != nil {
+ return nil, err
+ }
+ if yc.Certfile == "" || yc.Keyfile == "" {
+ return nil, nil
+ }
+ reloader := func(_ *tls.CertificateRequestInfo) (*tls.Certificate, error) {
+ cer, err := tls.LoadX509KeyPair(yc.Certfile, yc.Keyfile)
+ return &cer, err
+ }
+ return reloader, nil
+}
+
+// copy of relevant internal structure fields in go.etcd.io/etcd/clientv3/yaml
+// needed to implement certificates reload, not depending on the deprecated
+// newconfig/yamlConfig.
+type yamlKeyPairConfig struct {
+ Certfile string `json:"cert-file"`
+ Keyfile string `json:"key-file"`
+}
+
+// copy of the internal structure in github.com/etcd-io/etcd/clientv3/yaml so we
+// can still use the `ca-file` field for one more release.
+type yamlConfig struct {
+ client.Config
+
+ InsecureTransport bool `json:"insecure-transport"`
+ InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify"`
+ Certfile string `json:"cert-file"`
+ Keyfile string `json:"key-file"`
+ TrustedCAfile string `json:"trusted-ca-file"`
+
+ // CAfile is being deprecated. Use 'TrustedCAfile' instead.
+ // TODO: deprecate this in v4
+ CAfile string `json:"ca-file"`
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/kvstore/etcd_lease.go b/vendor/github.com/cilium/cilium/pkg/kvstore/etcd_lease.go
new file mode 100644
index 000000000..74cd7aa0f
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/kvstore/etcd_lease.go
@@ -0,0 +1,274 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package kvstore
+
+import (
+ "context"
+ "errors"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/sirupsen/logrus"
+ v3rpcErrors "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
+ client "go.etcd.io/etcd/client/v3"
+
+ "github.com/cilium/cilium/pkg/lock"
+ "github.com/cilium/cilium/pkg/spanstat"
+)
+
+// etcdLeaseClient represents the subset of the etcd client methods used to handle the leases lifecycle.
+type etcdLeaseClient interface {
+ Grant(ctx context.Context, ttl int64) (*client.LeaseGrantResponse, error)
+ KeepAlive(ctx context.Context, id client.LeaseID) (<-chan *client.LeaseKeepAliveResponse, error)
+ Ctx() context.Context
+}
+
+type leaseInfo struct {
+ count uint32
+ cancel context.CancelFunc
+}
+
+// etcdLeaseManager manages the acquisition of the leases, and keeps track of
+// which lease is attached to which etcd key.
+type etcdLeaseManager struct {
+ client etcdLeaseClient
+ log *logrus.Entry
+
+ ttl time.Duration
+ limit uint32
+ expired func(key string)
+
+ mu lock.RWMutex
+ leases map[client.LeaseID]*leaseInfo
+ keys map[string]client.LeaseID
+ current client.LeaseID
+
+ acquiring chan struct{}
+ wg sync.WaitGroup
+}
+
+// newEtcdLeaseManager builds and returns a new lease manager instance.
+func newEtcdLeaseManager(cl etcdLeaseClient, ttl time.Duration, limit uint32, expired func(key string), log *logrus.Entry) *etcdLeaseManager {
+ return &etcdLeaseManager{
+ client: cl,
+ log: log,
+
+ ttl: ttl,
+ limit: limit,
+ expired: expired,
+
+ current: client.NoLease,
+ leases: make(map[client.LeaseID]*leaseInfo),
+ keys: make(map[string]client.LeaseID),
+ }
+}
+
+// GetLeaseID returns a lease ID, and associates it to the given key. It leverages
+// one of the already acquired leases if they are not already attached to too many
+// keys, otherwise a new one is acquired.
+func (elm *etcdLeaseManager) GetLeaseID(ctx context.Context, key string) (client.LeaseID, error) {
+ elm.mu.Lock()
+
+ // This key is already attached to a lease, hence just return it.
+ if leaseID := elm.keys[key]; leaseID != client.NoLease {
+ elm.mu.Unlock()
+ return leaseID, nil
+ }
+
+ // Return the current lease if it has not been used more than limit times
+ if info := elm.leases[elm.current]; info != nil && info.count < elm.limit {
+ info.count++
+ elm.keys[key] = elm.current
+ elm.mu.Unlock()
+
+ return elm.current, nil
+ }
+
+ // Otherwise, loop through the other known leases to see if any has been released
+ for lease, info := range elm.leases {
+ if info.count < elm.limit {
+ elm.current = lease
+ info.count++
+ elm.keys[key] = elm.current
+ elm.mu.Unlock()
+
+ return elm.current, nil
+ }
+ }
+
+ // If none is found, we need to acquire a new lease. acquiring is a channel
+ // used to detect whether we are already in the process of acquiring a new
+ // lease, to prevent multiple acquisitions in parallel.
+ acquiring := elm.acquiring
+ if acquiring == nil {
+ elm.acquiring = make(chan struct{})
+ }
+
+ // Unlock, so that we don't block other paraller operations (e.g., releases)
+ // while acquiring a new lease, since it might be a slow operation.
+ elm.mu.Unlock()
+
+ // Someone else is already acquiring a new lease. Wait until
+ // it completes, and then retry again.
+ if acquiring != nil {
+ select {
+ case <-acquiring:
+ return elm.GetLeaseID(ctx, key)
+ case <-ctx.Done():
+ return client.NoLease, ctx.Err()
+ }
+ }
+
+ // Otherwise, we can proceed to acquire a new lease.
+ leaseID, cancel, err := elm.newLease(ctx)
+
+ elm.mu.Lock()
+
+ // Signal that the acquisition process has completed.
+ close(elm.acquiring)
+ elm.acquiring = nil
+
+ if err != nil {
+ elm.mu.Unlock()
+ return client.NoLease, err
+ }
+
+ elm.current = leaseID
+ elm.leases[leaseID] = &leaseInfo{cancel: cancel}
+ elm.mu.Unlock()
+
+ return elm.GetLeaseID(ctx, key)
+}
+
+// Release decrements the counter of the lease attached to the given key.
+func (elm *etcdLeaseManager) Release(key string) {
+ elm.mu.Lock()
+ defer elm.mu.Unlock()
+
+ elm.releaseUnlocked(key)
+}
+
+// ReleasePrefix decrements the counter of the leases attached to the keys
+// starting with the given prefix.
+func (elm *etcdLeaseManager) ReleasePrefix(prefix string) {
+ elm.mu.Lock()
+ defer elm.mu.Unlock()
+
+ for key, leaseID := range elm.keys {
+ if strings.HasPrefix(key, prefix) {
+ if info := elm.leases[leaseID]; info != nil && info.count > 0 {
+ info.count--
+ }
+ delete(elm.keys, key)
+ }
+ }
+}
+
+// KeyHasLease returns whether the given key is associated with the specified lease.
+func (elm *etcdLeaseManager) KeyHasLease(key string, leaseID client.LeaseID) bool {
+ elm.mu.RLock()
+ defer elm.mu.RUnlock()
+
+ return elm.keys[key] == leaseID
+}
+
+// CancelIfExpired verifies whether the error reports that the given lease has
+// expired, and in that case aborts the corresponding keepalive process.
+func (elm *etcdLeaseManager) CancelIfExpired(err error, leaseID client.LeaseID) {
+ if errors.Is(err, v3rpcErrors.ErrLeaseNotFound) {
+ elm.mu.Lock()
+ if info := elm.leases[leaseID]; info != nil {
+ info.cancel()
+ }
+ elm.mu.Unlock()
+ }
+}
+
+// TotalLeases returns the number of managed leases.
+func (elm *etcdLeaseManager) TotalLeases() uint32 {
+ elm.mu.RLock()
+ defer elm.mu.RUnlock()
+
+ return uint32(len(elm.leases))
+}
+
+// Wait waits until all child goroutines terminated.
+func (elm *etcdLeaseManager) Wait() {
+ elm.wg.Wait()
+}
+
+func (elm *etcdLeaseManager) newLease(ctx context.Context) (c client.LeaseID, cancelFn context.CancelFunc, err error) {
+ defer func(duration *spanstat.SpanStat) {
+ increaseMetric("lease", metricSet, "AcquireLease", duration.EndError(err).Total(), err)
+ }(spanstat.Start())
+ resp, err := elm.client.Grant(ctx, int64(elm.ttl.Seconds()))
+ if err != nil {
+ return client.NoLease, nil, err
+ }
+ leaseID := resp.ID
+
+ kctx, cancel := context.WithCancel(context.Background())
+ keepalive, err := elm.client.KeepAlive(kctx, leaseID)
+ if err != nil {
+ cancel()
+ return client.NoLease, nil, err
+ }
+
+ elm.wg.Add(1)
+ go elm.keepalive(kctx, leaseID, keepalive)
+
+ elm.log.WithFields(logrus.Fields{
+ "LeaseID": leaseID,
+ "TTL": elm.ttl,
+ }).Info("New lease successfully acquired")
+ return leaseID, cancel, nil
+}
+
+func (elm *etcdLeaseManager) keepalive(ctx context.Context, leaseID client.LeaseID,
+ keepalive <-chan *client.LeaseKeepAliveResponse) {
+ defer elm.wg.Done()
+
+ for range keepalive {
+ // Consume the keepalive messages until the channel is closed
+ }
+
+ select {
+ case <-elm.client.Ctx().Done():
+ // The context of the etcd client was closed
+ return
+ case <-ctx.Done():
+ default:
+ }
+
+ elm.log.WithField("LeaseID", leaseID).Warning("Lease expired")
+
+ elm.mu.Lock()
+ delete(elm.leases, leaseID)
+
+ var keys []string
+ for key, id := range elm.keys {
+ if id == leaseID {
+ keys = append(keys, key)
+ delete(elm.keys, key)
+ }
+ }
+ elm.mu.Unlock()
+
+ if elm.expired != nil {
+ for _, key := range keys {
+ elm.expired(key)
+ }
+ }
+}
+
+func (elm *etcdLeaseManager) releaseUnlocked(key string) {
+ leaseID := elm.keys[key]
+ if leaseID != client.NoLease {
+ if info := elm.leases[leaseID]; info != nil && info.count > 0 {
+ info.count--
+ }
+ delete(elm.keys, key)
+ }
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/kvstore/events.go b/vendor/github.com/cilium/cilium/pkg/kvstore/events.go
new file mode 100644
index 000000000..3ac02575f
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/kvstore/events.go
@@ -0,0 +1,112 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+package kvstore
+
+import (
+ "context"
+ "sync"
+)
+
+// EventType defines the type of watch event that occurred
+type EventType int
+
+const (
+ // EventTypeCreate represents a newly created key
+ EventTypeCreate EventType = iota
+ // EventTypeModify represents a modified key
+ EventTypeModify
+ // EventTypeDelete represents a deleted key
+ EventTypeDelete
+ //EventTypeListDone signals that the initial list operation has completed
+ EventTypeListDone
+)
+
+// String() returns the human readable format of an event type
+func (t EventType) String() string {
+ switch t {
+ case EventTypeCreate:
+ return "create"
+ case EventTypeModify:
+ return "modify"
+ case EventTypeDelete:
+ return "delete"
+ case EventTypeListDone:
+ return "listDone"
+ default:
+ return "unknown"
+ }
+}
+
+// KeyValueEvent is a change event for a Key/Value pair
+type KeyValueEvent struct {
+ // Typ is the type of event { EventTypeCreate | EventTypeModify | EventTypeDelete | EventTypeListDone }
+ Typ EventType
+
+ // Key is the kvstore key that changed
+ Key string
+
+ // Value is the kvstore value associated with the key
+ Value []byte
+}
+
+// EventChan is a channel to receive events on
+type EventChan chan KeyValueEvent
+
+// stopChan is the channel used to indicate stopping of the watcher
+type stopChan chan struct{}
+
+// Watcher represents a KVstore watcher
+type Watcher struct {
+ // Events is the channel to which change notifications will be sent to
+ Events EventChan `json:"-"`
+
+ Name string `json:"name"`
+ Prefix string `json:"prefix"`
+ stopWatch stopChan
+
+ // stopOnce guarantees that Stop() is only called once
+ stopOnce sync.Once
+
+ // stopWait is the wait group to wait for watchers to exit gracefully
+ stopWait sync.WaitGroup
+}
+
+func newWatcher(name, prefix string, chanSize int) *Watcher {
+ w := &Watcher{
+ Name: name,
+ Prefix: prefix,
+ Events: make(EventChan, chanSize),
+ stopWatch: make(stopChan),
+ }
+
+ w.stopWait.Add(1)
+
+ return w
+}
+
+// String returns the name of the wather
+func (w *Watcher) String() string {
+ return w.Name
+}
+
+// ListAndWatch creates a new watcher which will watch the specified prefix for
+// changes. Before doing this, it will list the current keys matching the
+// prefix and report them as new keys. Name can be set to anything and is used
+// for logging messages. The Events channel is created with the specified
+// sizes. Upon every change observed, a KeyValueEvent will be sent to the
+// Events channel
+//
+// Returns a watcher structure plus a channel that is closed when the initial
+// list operation has been completed
+func ListAndWatch(ctx context.Context, name, prefix string, chanSize int) *Watcher {
+ return Client().ListAndWatch(ctx, name, prefix, chanSize)
+}
+
+// Stop stops a watcher previously created and started with Watch()
+func (w *Watcher) Stop() {
+ w.stopOnce.Do(func() {
+ close(w.stopWatch)
+ log.WithField(fieldWatcher, w).Debug("Stopped watcher")
+ w.stopWait.Wait()
+ })
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/kvstore/kvstore.go b/vendor/github.com/cilium/cilium/pkg/kvstore/kvstore.go
new file mode 100644
index 000000000..44d0c3c96
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/kvstore/kvstore.go
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package kvstore
+
+import (
+ "strings"
+ "time"
+)
+
+// Value is an abstraction of the data stored in the kvstore as well as the
+// mod revision of that data.
+type Value struct {
+ Data []byte
+ ModRevision uint64
+ LeaseID int64
+ SessionID string
+}
+
+// KeyValuePairs is a map of key=value pairs
+type KeyValuePairs map[string]Value
+
+// Capabilities is a bitmask to indicate the capabilities of a backend
+type Capabilities uint32
+
+const (
+ // CapabilityCreateIfExists is true if CreateIfExists is functional
+ CapabilityCreateIfExists Capabilities = 1 << 0
+
+ // CapabilityDeleteOnZeroCount is true if DeleteOnZeroCount is functional
+ CapabilityDeleteOnZeroCount Capabilities = 1 << 1
+
+ // BaseKeyPrefix is the base prefix that should be used for all keys
+ BaseKeyPrefix = "cilium"
+
+ // InitLockPath is the path to the init lock to test quorum
+ InitLockPath = BaseKeyPrefix + "/.initlock"
+
+ // HeartbeatPath is the path to the key at which the operator updates
+ // the heartbeat
+ HeartbeatPath = BaseKeyPrefix + "/.heartbeat"
+
+ // HasClusterConfigPath is the path to the key used to convey that the cluster
+ // configuration will be eventually created, and remote cilium agents shall
+ // wait until it is present. If this key is not set, the cilium configuration
+ // might, or might not, be configured, but the agents will continue regardless,
+ // falling back to the backward compatible behavior. It must be set before that
+ // the agents have the possibility to connect to the kvstore (that is, when
+ // it is not yet exposed). The corresponding values is ignored.
+ HasClusterConfigPath = BaseKeyPrefix + "/.has-cluster-config"
+
+ // ClusterConfigPrefix is the kvstore prefix to cluster configuration
+ ClusterConfigPrefix = BaseKeyPrefix + "/cluster-config"
+
+ // SyncedPrefix is the kvstore prefix used to convey whether
+ // synchronization from an external source has completed for a given prefix
+ SyncedPrefix = BaseKeyPrefix + "/synced"
+
+ // HeartbeatWriteInterval is the interval in which the heartbeat key at
+ // HeartbeatPath is updated
+ HeartbeatWriteInterval = time.Minute
+)
+
+// StateToCachePrefix converts a kvstore prefix starting with "cilium/state"
+// (holding the cilium state) to the corresponding one holding cached information
+// from another kvstore (that is, "cilium/cache").
+func StateToCachePrefix(prefix string) string {
+ if strings.HasPrefix(prefix, "cilium/state") {
+ return strings.Replace(prefix, "cilium/state", "cilium/cache", 1)
+ }
+ return prefix
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/kvstore/lock.go b/vendor/github.com/cilium/cilium/pkg/kvstore/lock.go
new file mode 100644
index 000000000..964b27d08
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/kvstore/lock.go
@@ -0,0 +1,172 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package kvstore
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/davecgh/go-spew/spew"
+ "github.com/google/uuid"
+ "github.com/sirupsen/logrus"
+
+ "github.com/cilium/cilium/pkg/debug"
+ "github.com/cilium/cilium/pkg/defaults"
+ "github.com/cilium/cilium/pkg/inctimer"
+ "github.com/cilium/cilium/pkg/lock"
+)
+
+var (
+ kvstoreLocks = pathLocks{lockPaths: map[string]lockOwner{}}
+
+ // staleLockTimeout is the timeout after which waiting for a believed
+ // other local lock user for the same key is given up on and etcd is
+ // asked directly. It is still highly unlikely that concurrent access
+ // occurs as only one consumer will manage to acquire the newly
+ // released lock. The only possibility of concurrent access is if a
+ // consumer is *still* holding the lock but this is highly unlikely
+ // given the duration of this timeout.
+ staleLockTimeout = defaults.KVStoreStaleLockTimeout
+)
+
+type KVLocker interface {
+ Unlock(ctx context.Context) error
+ // Comparator returns an object that should be used by the KVStore to make
+ // sure if the lock is still valid for its client or nil if no such
+ // verification exists.
+ Comparator() interface{}
+}
+
+// getLockPath returns the lock path representation of the given path.
+func getLockPath(path string) string {
+ return path + ".lock"
+}
+
+type lockOwner struct {
+ created time.Time
+ id uuid.UUID
+}
+
+type pathLocks struct {
+ mutex lock.RWMutex
+ lockPaths map[string]lockOwner
+}
+
+func init() {
+ debug.RegisterStatusObject("kvstore-locks", &kvstoreLocks)
+}
+
+// DebugStatus implements debug.StatusObject to provide debug status collection
+// ability
+func (pl *pathLocks) DebugStatus() string {
+ pl.mutex.RLock()
+ str := spew.Sdump(pl.lockPaths)
+ pl.mutex.RUnlock()
+ return str
+}
+
+func (pl *pathLocks) runGC() {
+ pl.mutex.Lock()
+ for path, owner := range pl.lockPaths {
+ if time.Since(owner.created) > staleLockTimeout {
+ log.WithField("path", path).Error("Forcefully unlocking local kvstore lock")
+ delete(pl.lockPaths, path)
+ }
+ }
+ pl.mutex.Unlock()
+}
+
+func (pl *pathLocks) lock(ctx context.Context, path string) (id uuid.UUID, err error) {
+ lockTimer, lockTimerDone := inctimer.New()
+ defer lockTimerDone()
+ for {
+ pl.mutex.Lock()
+ if _, ok := pl.lockPaths[path]; !ok {
+ id = uuid.New()
+ pl.lockPaths[path] = lockOwner{
+ created: time.Now(),
+ id: id,
+ }
+ pl.mutex.Unlock()
+ return
+ }
+ pl.mutex.Unlock()
+
+ select {
+ case <-lockTimer.After(time.Duration(10) * time.Millisecond):
+ case <-ctx.Done():
+ err = fmt.Errorf("lock was cancelled: %s", ctx.Err())
+ return
+ }
+ }
+}
+
+func (pl *pathLocks) unlock(path string, id uuid.UUID) {
+ pl.mutex.Lock()
+ if owner, ok := pl.lockPaths[path]; ok && owner.id == id {
+ delete(pl.lockPaths, path)
+ }
+ pl.mutex.Unlock()
+}
+
+// Lock is a lock return by LockPath
+type Lock struct {
+ path string
+ id uuid.UUID
+ kvLock KVLocker
+}
+
+// LockPath locks the specified path. The key for the lock is not the path
+// provided itself but the path with a suffix of ".lock" appended. The lock
+// returned also contains a patch specific local Mutex which will be held.
+//
+// It is required to call Unlock() on the returned Lock to unlock
+func LockPath(ctx context.Context, backend BackendOperations, path string) (l *Lock, err error) {
+ id, err := kvstoreLocks.lock(ctx, path)
+ if err != nil {
+ return nil, err
+ }
+
+ lock, err := backend.LockPath(ctx, path)
+ if err != nil {
+ kvstoreLocks.unlock(path, id)
+ Trace("Failed to lock", err, logrus.Fields{fieldKey: path})
+ err = fmt.Errorf("error while locking path %s: %s", path, err)
+ return nil, err
+ }
+
+ Trace("Successful lock", err, logrus.Fields{fieldKey: path})
+ return &Lock{kvLock: lock, path: path, id: id}, err
+}
+
+// RunLockGC inspects all local kvstore locks to determine whether they have
+// been held longer than the stale lock timeout, and if so, unlocks them
+// forceably.
+func RunLockGC() {
+ kvstoreLocks.runGC()
+}
+
+// Unlock unlocks a lock
+func (l *Lock) Unlock(ctx context.Context) error {
+ if l == nil {
+ return nil
+ }
+
+ // Unlock kvstore mutex first
+ err := l.kvLock.Unlock(ctx)
+ if err != nil {
+ log.WithError(err).WithField("path", l.path).Error("Unable to unlock kvstore lock")
+ }
+
+ // unlock local lock even if kvstore cannot be unlocked
+ kvstoreLocks.unlock(l.path, l.id)
+ Trace("Unlocked", nil, logrus.Fields{fieldKey: l.path})
+
+ return err
+}
+
+func (l *Lock) Comparator() interface{} {
+ return l.kvLock.Comparator()
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/kvstore/logfields.go b/vendor/github.com/cilium/cilium/pkg/kvstore/logfields.go
new file mode 100644
index 000000000..73227f6b2
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/kvstore/logfields.go
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package kvstore
+
+import (
+ "github.com/cilium/cilium/pkg/logging"
+ "github.com/cilium/cilium/pkg/logging/logfields"
+)
+
+var log = logging.DefaultLogger.WithField(logfields.LogSubsys, "kvstore")
+
+const (
+ // fieldKVStoreModule is the name of the kvstore backend (etcd or consul)
+ fieldKVStoreModule = "module"
+
+ // name of watcher
+ fieldWatcher = "watcher"
+
+ // key revision
+ fieldRev = "revision"
+
+ // fieldSession refers to a connection/session with the kvstore
+ fieldSession = "session"
+
+ // fieldPrefix is the prefix of the key used in the operation
+ fieldPrefix = "prefix"
+
+ // fieldKey is the prefix of the key used in the operation
+ fieldKey = "key"
+
+ // fieldValue is the prefix of the key used in the operation
+ fieldValue = "value"
+
+ // fieldCondition is the condition that requires to be met
+ fieldCondition = "condition"
+
+ // fieldNumEntries is the number of entries in the result
+ fieldNumEntries = "numEntries"
+
+ // fieldRemainingEntries is the number of entries still to be retrieved
+ fieldRemainingEntries = "remainingEntries"
+
+ // fieldAttachLease is true if the key must be attached to a lease
+ fieldAttachLease = "attachLease"
+
+ // fieldEtcdEndpoint is the etcd endpoint we talk to
+ fieldEtcdEndpoint = "etcdEndpoint"
+
+ // FieldUser identifies a user in the kvstore
+ FieldUser = logfields.User
+
+ // FieldRole identifies a role in the kvstore
+ FieldRole = "role"
+)
diff --git a/vendor/github.com/cilium/cilium/pkg/kvstore/metrics.go b/vendor/github.com/cilium/cilium/pkg/kvstore/metrics.go
new file mode 100644
index 000000000..e57cbe9f8
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/kvstore/metrics.go
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package kvstore
+
+import (
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/cilium/cilium/pkg/metrics"
+)
+
+const (
+ metricDelete = "delete"
+ metricRead = "read"
+ metricSet = "set"
+)
+
+func GetScopeFromKey(key string) string {
+ s := strings.SplitN(key, "/", 5)
+ if len(s) < 4 {
+ if len(key) >= 12 {
+ return key[:12]
+ }
+ return key
+ }
+ return fmt.Sprintf("%s/%s", s[2], s[3])
+}
+
+func increaseMetric(key, kind, action string, duration time.Duration, err error) {
+ if !metrics.KVStoreOperationsDuration.IsEnabled() {
+ return
+ }
+ namespace := GetScopeFromKey(key)
+ outcome := metrics.Error2Outcome(err)
+ metrics.KVStoreOperationsDuration.
+ WithLabelValues(namespace, kind, action, outcome).Observe(duration.Seconds())
+}
+
+func trackEventQueued(key string, typ EventType, duration time.Duration) {
+ if !metrics.KVStoreEventsQueueDuration.IsEnabled() {
+ return
+ }
+ metrics.KVStoreEventsQueueDuration.WithLabelValues(GetScopeFromKey(key), typ.String()).Observe(duration.Seconds())
+}
+
+func recordQuorumError(err string) {
+ if !metrics.KVStoreQuorumErrors.IsEnabled() {
+ return
+ }
+ metrics.KVStoreQuorumErrors.WithLabelValues(err).Inc()
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/kvstore/store/cell.go b/vendor/github.com/cilium/cilium/pkg/kvstore/store/cell.go
new file mode 100644
index 000000000..238c641d4
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/kvstore/store/cell.go
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package store
+
+import (
+ "github.com/cilium/cilium/pkg/hive/cell"
+)
+
+var Cell = cell.Module(
+ "kvstore-utils",
+ "Provides factory for kvstore related synchronizers",
+
+ cell.Provide(NewFactory),
+
+ cell.Metric(MetricsProvider),
+)
+
+type Factory interface {
+ NewSyncStore(clusterName string, backend SyncStoreBackend, prefix string, opts ...WSSOpt) SyncStore
+ NewWatchStore(clusterName string, keyCreator KeyCreator, observer Observer, opts ...RWSOpt) WatchStore
+ NewWatchStoreManager(backend WatchStoreBackend, clusterName string) WatchStoreManager
+}
+
+type factoryImpl struct {
+ metrics *Metrics
+}
+
+func (w *factoryImpl) NewSyncStore(clusterName string, backend SyncStoreBackend, prefix string, opts ...WSSOpt) SyncStore {
+ return newWorkqueueSyncStore(clusterName, backend, prefix, w.metrics, opts...)
+}
+
+func (w *factoryImpl) NewWatchStore(clusterName string, keyCreator KeyCreator, observer Observer, opts ...RWSOpt) WatchStore {
+ return newRestartableWatchStore(clusterName, keyCreator, observer, w.metrics, opts...)
+}
+
+func (w *factoryImpl) NewWatchStoreManager(backend WatchStoreBackend, clusterName string) WatchStoreManager {
+ return newWatchStoreManagerSync(backend, clusterName, w)
+}
+
+func NewFactory(storeMetrics *Metrics) Factory {
+ return &factoryImpl{
+ metrics: storeMetrics,
+ }
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/kvstore/store/doc.go b/vendor/github.com/cilium/cilium/pkg/kvstore/store/doc.go
new file mode 100644
index 000000000..780065293
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/kvstore/store/doc.go
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Package store implements a shared store backed by a kvstore or similar with
+// the following properties:
+//
+// - A single type is used to represent all keys
+// - Any number of collaborators can join the store. Typically a collaborator
+// is an individual Cilium agent running on each node.
+// - All collaborators can own and contribute keys to the store. Each key is
+// owned by exactly one collaborator. It is the responsibility of each
+// collaborator to pick a key name which is guaranteed to be unique.
+// - All collaborate desire to see all keys within the scope of a store. The
+// scope of the store is defined by a common key prefix. For this purpose,
+// each collaborator maintains a local cache of all keys in the store by
+// subscribing to change events.
+package store
diff --git a/vendor/github.com/cilium/cilium/pkg/kvstore/store/metrics.go b/vendor/github.com/cilium/cilium/pkg/kvstore/store/metrics.go
new file mode 100644
index 000000000..eb6508977
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/kvstore/store/metrics.go
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package store
+
+import (
+ "github.com/cilium/cilium/pkg/metrics"
+ "github.com/cilium/cilium/pkg/metrics/metric"
+)
+
+type Metrics struct {
+ KVStoreSyncQueueSize metric.Vec[metric.Gauge]
+ KVStoreInitialSyncCompleted metric.Vec[metric.Gauge]
+}
+
+func MetricsProvider() *Metrics {
+ return &Metrics{
+ KVStoreSyncQueueSize: metric.NewGaugeVec(metric.GaugeOpts{
+ ConfigName: metrics.Namespace + "_" + metrics.SubsystemKVStore + "_sync_queue_size",
+ Namespace: metrics.Namespace,
+ Subsystem: metrics.SubsystemKVStore,
+ Name: "sync_queue_size",
+ Help: "Number of elements queued for synchronization in the kvstore",
+ }, []string{metrics.LabelScope, metrics.LabelSourceCluster}),
+
+ KVStoreInitialSyncCompleted: metric.NewGaugeVec(metric.GaugeOpts{
+ ConfigName: metrics.Namespace + "_" + metrics.SubsystemKVStore + "_initial_sync_completed",
+ Namespace: metrics.Namespace,
+ Subsystem: metrics.SubsystemKVStore,
+ Name: "initial_sync_completed",
+ Help: "Whether the initial synchronization from/to the kvstore has completed",
+ }, []string{metrics.LabelScope, metrics.LabelSourceCluster, metrics.LabelAction}),
+ }
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/kvstore/store/store.go b/vendor/github.com/cilium/cilium/pkg/kvstore/store/store.go
new file mode 100644
index 000000000..31702998a
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/kvstore/store/store.go
@@ -0,0 +1,506 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package store
+
+import (
+ "context"
+ "fmt"
+ "path"
+ "strings"
+ "time"
+
+ "github.com/sirupsen/logrus"
+
+ "github.com/cilium/cilium/pkg/controller"
+ "github.com/cilium/cilium/pkg/kvstore"
+ "github.com/cilium/cilium/pkg/lock"
+ "github.com/cilium/cilium/pkg/logging"
+ "github.com/cilium/cilium/pkg/logging/logfields"
+ "github.com/cilium/cilium/pkg/option"
+)
+
+const (
+ // listTimeoutDefault is the default timeout to wait while performing
+ // the initial list operation of objects from the kvstore
+ listTimeoutDefault = 3 * time.Minute
+
+ // watcherChanSize is the size of the channel to buffer kvstore events
+ watcherChanSize = 100
+)
+
+var (
+ controllers controller.Manager
+
+ log = logging.DefaultLogger.WithField(logfields.LogSubsys, "shared-store")
+
+ kvstoreSyncControllerGroup = controller.NewGroup("kvstore-sync")
+)
+
+// KeyCreator is the function to create a new empty Key instances. Store
+// collaborators must implement this interface and provide the implementation
+// in the Configuration structure.
+type KeyCreator func() Key
+
+// Configuration is the set of configuration parameters of a shared store.
+type Configuration struct {
+ // Prefix is the key prefix of the store shared by all keys. The prefix
+ // is the unique identification of the store. Multiple collaborators
+ // connected to the same kvstore cluster configuring stores with
+ // matching prefixes will automatically form a shared store. This
+ // parameter is required.
+ Prefix string
+
+ // SynchronizationInterval is the interval in which locally owned keys
+ // are synchronized with the kvstore. This parameter is optional.
+ SynchronizationInterval time.Duration
+
+ // SharedKeyDeleteDelay is the delay before a shared key delete is
+ // handled. This parameter is optional, and defaults to 0 if unset.
+ SharedKeyDeleteDelay time.Duration
+
+ // KeyCreator is called to allocate a Key instance when a new shared
+ // key is discovered. This parameter is required.
+ KeyCreator KeyCreator
+
+ // Backend is the kvstore to use as a backend. If no backend is
+ // specified, kvstore.Client() is being used.
+ Backend kvstore.BackendOperations
+
+ // Observer is the observe that will receive events on key mutations
+ Observer Observer
+
+ Context context.Context
+}
+
+// validate is invoked by JoinSharedStore to validate and complete the
+// configuration. It returns nil when the configuration is valid.
+func (c *Configuration) validate() error {
+ if c.Prefix == "" {
+ return fmt.Errorf("prefix must be specified")
+ }
+
+ if c.KeyCreator == nil {
+ return fmt.Errorf("KeyCreator must be specified")
+ }
+
+ if c.SynchronizationInterval == 0 {
+ c.SynchronizationInterval = option.Config.KVstorePeriodicSync
+ }
+
+ if c.Backend == nil {
+ c.Backend = kvstore.Client()
+ }
+
+ if c.Context == nil {
+ c.Context = context.Background()
+ }
+
+ return nil
+}
+
+// SharedStore is an instance of a shared store. It is created with
+// JoinSharedStore() and released with the SharedStore.Close() function.
+type SharedStore struct {
+ // conf is a copy of the store configuration. This field is never
+ // mutated after JoinSharedStore() so it is safe to access this without
+ // a lock.
+ conf Configuration
+
+ // name is the name of the shared store. It is derived from the kvstore
+ // prefix.
+ name string
+
+ // controllerName is the name of the controller used to synchronize
+ // with the kvstore. It is derived from the name.
+ controllerName string
+
+ // backend is the backend as configured via Configuration
+ backend kvstore.BackendOperations
+
+ // mutex protects mutations to localKeys and sharedKeys
+ mutex lock.RWMutex
+
+ // localKeys is a map of keys that are owned by the local instance. All
+ // local keys are synchronized with the kvstore. This map can be
+ // modified with UpdateLocalKey() and DeleteLocalKey().
+ localKeys map[string]LocalKey
+
+ // sharedKeys is a map of all keys that either have been discovered
+ // from remote collaborators or successfully shared local keys. This
+ // map represents the state in the kvstore and is updated based on
+ // kvstore events.
+ sharedKeys map[string]Key
+
+ kvstoreWatcher *kvstore.Watcher
+}
+
+// Observer receives events when objects in the store mutate
+type Observer interface {
+ // OnDelete is called when the key has been deleted from the shared store
+ OnDelete(k NamedKey)
+
+ // OnUpdate is called whenever a change has occurred in the data
+ // structure represented by the key
+ OnUpdate(k Key)
+}
+
+// NamedKey is an interface that a data structure must implement in order to
+// be deleted from a SharedStore.
+type NamedKey interface {
+ // GetKeyName must return the name of the key. The name of the key must
+ // be unique within the store and stable for a particular key. The name
+ // of the key must be identical across agent restarts as the keys
+ // remain in the kvstore.
+ GetKeyName() string
+}
+
+// Key is the interface that a data structure must implement in order to be
+// stored and shared as a key in a SharedStore.
+type Key interface {
+ NamedKey
+
+ // Marshal is called to retrieve the byte slice representation of the
+ // data represented by the key to store it in the kvstore. The function
+ // must ensure that the underlying datatype is properly locked. It is
+ // typically a good idea to use json.Marshal to implement this
+ // function.
+ Marshal() ([]byte, error)
+
+ // Unmarshal is called when an update from the kvstore is received. The
+ // prefix configured for the store is removed from the key, and the
+ // byte slice passed to the function is coming from the Marshal
+ // function from another collaborator. The function must unmarshal and
+ // update the underlying data type. It is typically a good idea to use
+ // json.Unmarshal to implement this function.
+ Unmarshal(key string, data []byte) error
+}
+
+// LocalKey is a Key owned by the local store instance
+type LocalKey interface {
+ Key
+
+ // DeepKeyCopy must return a deep copy of the key
+ DeepKeyCopy() LocalKey
+}
+
+// KVPair represents a basic implementation of the LocalKey interface
+type KVPair struct{ Key, Value string }
+
+func NewKVPair(key, value string) *KVPair { return &KVPair{Key: key, Value: value} }
+func KVPairCreator() Key { return &KVPair{} }
+
+func (kv *KVPair) GetKeyName() string { return kv.Key }
+func (kv *KVPair) Marshal() ([]byte, error) { return []byte(kv.Value), nil }
+
+func (kv *KVPair) Unmarshal(key string, data []byte) error {
+ kv.Key, kv.Value = key, string(data)
+ return nil
+}
+
+func (kv *KVPair) DeepKeyCopy() LocalKey {
+ return NewKVPair(kv.Key, kv.Value)
+}
+
+// JoinSharedStore creates a new shared store based on the provided
+// configuration. An error is returned if the configuration is invalid. The
+// store is initialized with the contents of the kvstore. An error is returned
+// if the contents cannot be retrieved synchronously from the kvstore. Starts a
+// controller to continuously synchronize the store with the kvstore.
+func JoinSharedStore(c Configuration) (*SharedStore, error) {
+ if err := c.validate(); err != nil {
+ return nil, err
+ }
+
+ s := &SharedStore{
+ conf: c,
+ localKeys: map[string]LocalKey{},
+ sharedKeys: map[string]Key{},
+ backend: c.Backend,
+ }
+
+ s.name = "store-" + s.conf.Prefix
+ s.controllerName = "kvstore-sync-" + s.name
+
+ if err := s.listAndStartWatcher(); err != nil {
+ return nil, err
+ }
+
+ controllers.UpdateController(s.controllerName,
+ controller.ControllerParams{
+ Group: kvstoreSyncControllerGroup,
+ DoFunc: func(ctx context.Context) error {
+ return s.syncLocalKeys(ctx, true)
+ },
+ RunInterval: s.conf.SynchronizationInterval,
+ },
+ )
+
+ return s, nil
+}
+
+func (s *SharedStore) onDelete(k NamedKey) {
+ if s.conf.Observer != nil {
+ s.conf.Observer.OnDelete(k)
+ }
+}
+
+func (s *SharedStore) onUpdate(k Key) {
+ if s.conf.Observer != nil {
+ s.conf.Observer.OnUpdate(k)
+ }
+}
+
+// Release frees all resources own by the store but leaves all keys in the
+// kvstore intact
+func (s *SharedStore) Release() {
+ // Wait for all write operations to complete and then block all further
+ // operations
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+
+ if s.kvstoreWatcher != nil {
+ s.kvstoreWatcher.Stop()
+ }
+
+ controllers.RemoveController(s.controllerName)
+}
+
+// Close stops participation with a shared store and removes all keys owned by
+// this node in the kvstore. This stops the controller started by
+// JoinSharedStore().
+func (s *SharedStore) Close(ctx context.Context) {
+ s.Release()
+
+ for name, key := range s.localKeys {
+ if err := s.backend.Delete(ctx, s.keyPath(key)); err != nil {
+ s.getLogger().WithError(err).Warning("Unable to delete key in kvstore")
+ }
+
+ delete(s.localKeys, name)
+ // Since we have received our own notification we also need to remove
+ // it from the shared keys.
+ delete(s.sharedKeys, name)
+
+ s.onDelete(key)
+ }
+}
+
+// keyPath returns the absolute kvstore path of a key
+func (s *SharedStore) keyPath(key NamedKey) string {
+ // WARNING - STABLE API: The composition of the absolute key path
+ // cannot be changed without breaking up and downgrades.
+ return path.Join(s.conf.Prefix, key.GetKeyName())
+}
+
+// syncLocalKey synchronizes a key to the kvstore
+func (s *SharedStore) syncLocalKey(ctx context.Context, key LocalKey, lease bool) error {
+ jsonValue, err := key.Marshal()
+ if err != nil {
+ return err
+ }
+
+ // Update key in kvstore, overwrite an eventual existing key. If requested, attach
+ // lease to expire entry when agent dies and never comes back up.
+ if _, err := s.backend.UpdateIfDifferent(ctx, s.keyPath(key), jsonValue, lease); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// syncLocalKeys synchronizes all local keys with the kvstore
+func (s *SharedStore) syncLocalKeys(ctx context.Context, lease bool) error {
+ // Create a copy of all local keys so we can unlock and sync to kvstore
+ // without holding the lock
+ s.mutex.RLock()
+ keys := make([]LocalKey, 0, len(s.localKeys))
+ for _, key := range s.localKeys {
+ keys = append(keys, key)
+ }
+ s.mutex.RUnlock()
+
+ for _, key := range keys {
+ if err := s.syncLocalKey(ctx, key, lease); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (s *SharedStore) lookupLocalKey(name string) LocalKey {
+ s.mutex.RLock()
+ defer s.mutex.RUnlock()
+
+ for _, key := range s.localKeys {
+ if key.GetKeyName() == name {
+ return key
+ }
+ }
+
+ return nil
+}
+
+// NumEntries returns the number of entries in the store
+func (s *SharedStore) NumEntries() int {
+ if s == nil {
+ return 0
+ }
+
+ s.mutex.RLock()
+ defer s.mutex.RUnlock()
+ return len(s.sharedKeys)
+}
+
+// SharedKeysMap returns a copy of the SharedKeysMap, the returned map can
+// be safely modified but the values of the map represent the actual data
+// stored in the internal SharedStore SharedKeys map.
+func (s *SharedStore) SharedKeysMap() map[string]Key {
+ s.mutex.RLock()
+ defer s.mutex.RUnlock()
+ sharedKeysCopy := make(map[string]Key, len(s.sharedKeys))
+
+ for k, v := range s.sharedKeys {
+ sharedKeysCopy[k] = v
+ }
+ return sharedKeysCopy
+}
+
+// UpdateLocalKeySync synchronously synchronizes a local key with the kvstore
+// and adds it to the list of local keys to be synchronized if the initial
+// synchronous synchronization was successful
+func (s *SharedStore) UpdateLocalKeySync(ctx context.Context, key LocalKey) error {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ err := s.syncLocalKey(ctx, key, true)
+ if err == nil {
+ s.localKeys[key.GetKeyName()] = key.DeepKeyCopy()
+ }
+ return err
+}
+
+// UpdateKeySync synchronously synchronizes a key with the kvstore.
+func (s *SharedStore) UpdateKeySync(ctx context.Context, key LocalKey, lease bool) error {
+ return s.syncLocalKey(ctx, key, lease)
+}
+
+// DeleteLocalKey removes a key from being synchronized with the kvstore
+func (s *SharedStore) DeleteLocalKey(ctx context.Context, key NamedKey) {
+ name := key.GetKeyName()
+
+ s.mutex.Lock()
+ _, ok := s.localKeys[name]
+ delete(s.localKeys, name)
+ s.mutex.Unlock()
+
+ err := s.backend.Delete(ctx, s.keyPath(key))
+
+ if ok {
+ if err != nil {
+ s.getLogger().WithError(err).Warning("Unable to delete key in kvstore")
+ }
+
+ s.onDelete(key)
+ }
+}
+
+func (s *SharedStore) getLogger() *logrus.Entry {
+ return log.WithFields(logrus.Fields{
+ "storeName": s.name,
+ })
+}
+
+func (s *SharedStore) updateKey(name string, value []byte) error {
+ newKey := s.conf.KeyCreator()
+ if err := newKey.Unmarshal(name, value); err != nil {
+ return err
+ }
+
+ s.mutex.Lock()
+ s.sharedKeys[name] = newKey
+ s.mutex.Unlock()
+
+ s.onUpdate(newKey)
+ return nil
+}
+
+func (s *SharedStore) deleteSharedKey(name string) {
+ s.mutex.Lock()
+ existingKey, ok := s.sharedKeys[name]
+ delete(s.sharedKeys, name)
+ s.mutex.Unlock()
+
+ if ok {
+ go func() {
+ time.Sleep(s.conf.SharedKeyDeleteDelay)
+ s.mutex.RLock()
+ _, ok := s.sharedKeys[name]
+ s.mutex.RUnlock()
+ if ok {
+ s.getLogger().WithFields(logrus.Fields{"key": name, "timeWindow": s.conf.SharedKeyDeleteDelay}).
+ Warning("Received delete event for key which re-appeared within delay time window")
+ return
+ }
+
+ s.onDelete(existingKey)
+ }()
+ } else {
+ s.getLogger().WithField("key", name).
+ Warning("Unable to find deleted key in local state")
+ }
+}
+
+func (s *SharedStore) listAndStartWatcher() error {
+ listDone := make(chan struct{})
+
+ go s.watcher(listDone)
+
+ select {
+ case <-listDone:
+ case <-time.After(listTimeoutDefault):
+ return fmt.Errorf("timeout while retrieving initial list of objects from kvstore")
+ }
+
+ return nil
+}
+
+func (s *SharedStore) watcher(listDone chan struct{}) {
+ s.kvstoreWatcher = s.backend.ListAndWatch(s.conf.Context, s.name+"-watcher", s.conf.Prefix, watcherChanSize)
+
+ for event := range s.kvstoreWatcher.Events {
+ if event.Typ == kvstore.EventTypeListDone {
+ s.getLogger().Debug("Initial list of objects received from kvstore")
+ close(listDone)
+ continue
+ }
+
+ logger := s.getLogger().WithFields(logrus.Fields{
+ "key": event.Key,
+ "eventType": event.Typ,
+ })
+
+ logger.Debugf("Received key update via kvstore [value %s]", string(event.Value))
+
+ keyName := strings.TrimPrefix(event.Key, s.conf.Prefix)
+ if keyName[0] == '/' {
+ keyName = keyName[1:]
+ }
+
+ switch event.Typ {
+ case kvstore.EventTypeCreate, kvstore.EventTypeModify:
+ if err := s.updateKey(keyName, event.Value); err != nil {
+ logger.WithError(err).Warningf("Unable to unmarshal store value: %s", string(event.Value))
+ }
+
+ case kvstore.EventTypeDelete:
+ if localKey := s.lookupLocalKey(keyName); localKey != nil {
+ logger.Warning("Received delete event for local key. Re-creating the key in the kvstore")
+
+ s.syncLocalKey(s.conf.Context, localKey, true)
+ } else {
+ s.deleteSharedKey(keyName)
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/kvstore/store/syncstore.go b/vendor/github.com/cilium/cilium/pkg/kvstore/store/syncstore.go
new file mode 100644
index 000000000..ddd322e3b
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/kvstore/store/syncstore.go
@@ -0,0 +1,354 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package store
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "path"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/sirupsen/logrus"
+ "k8s.io/client-go/util/workqueue"
+
+ "github.com/cilium/cilium/pkg/kvstore"
+ "github.com/cilium/cilium/pkg/logging/logfields"
+ "github.com/cilium/cilium/pkg/metrics"
+)
+
+// SyncStore abstracts the operations allowing to synchronize key/value pairs
+// into a kvstore.
+type SyncStore interface {
+ // Run starts the SyncStore logic, blocking until the context is closed.
+ Run(ctx context.Context)
+
+ // UpsertKey upserts a key/value pair into the kvstore.
+ UpsertKey(ctx context.Context, key Key) error
+
+ // DeleteKey removes a key from the kvstore.
+ DeleteKey(ctx context.Context, key NamedKey) error
+
+ // Synced triggers the insertion of the "synced" key associated with this
+ // store into the kvstore once all upsertions already issued have completed
+ // successfully, eventually executing all specified callbacks (if any).
+ // Only the first invocation takes effect.
+ Synced(ctx context.Context, callbacks ...func(ctx context.Context)) error
+}
+
+// SyncStoreBackend represents the subset kvstore.BackendOperations leveraged
+// by SyncStore implementations.
+type SyncStoreBackend interface {
+ // Update creates or updates a key.
+ Update(ctx context.Context, key string, value []byte, lease bool) error
+ // Delete deletes a key.
+ Delete(ctx context.Context, key string) error
+
+ // RegisterLeaseExpiredObserver registers a function which is executed when
+ // the lease associated with a key having the given prefix is detected as expired.
+ RegisterLeaseExpiredObserver(prefix string, fn func(key string))
+}
+
+// wqSyncStore implements the SyncStore interface leveraging a workqueue to
+// coalescence update/delete requests and handle retries in case of errors.
+type wqSyncStore struct {
+ backend SyncStoreBackend
+ prefix string
+ source string
+
+ workers uint
+ withLease bool
+
+ limiter workqueue.RateLimiter
+ workqueue workqueue.RateLimitingInterface
+ state sync.Map /* map[string][]byte --- map[NamedKey.GetKeyName()]Key.Marshal() */
+
+ synced atomic.Bool // Synced() has been triggered
+ pendingSync sync.Map // map[string]struct{}: the set of keys still to sync
+ syncedKey string
+ syncedCallbacks []func(context.Context)
+
+ log *logrus.Entry
+ queuedMetric prometheus.Gauge
+ syncedMetric prometheus.Gauge
+}
+
+type syncCanary struct{ skipCallbacks bool }
+
+type WSSOpt func(*wqSyncStore)
+
+// WSSWithRateLimiter sets the rate limiting algorithm to be used when requeueing failed events.
+func WSSWithRateLimiter(limiter workqueue.RateLimiter) WSSOpt {
+ return func(wss *wqSyncStore) {
+ wss.limiter = limiter
+ }
+}
+
+// WSSWithWorkers configures the number of workers spawned by Run() to handle update/delete operations.
+func WSSWithWorkers(workers uint) WSSOpt {
+ return func(wss *wqSyncStore) {
+ wss.workers = workers
+ }
+}
+
+// WSSWithoutLease disables attaching the lease to upserted keys.
+func WSSWithoutLease() WSSOpt {
+ return func(wss *wqSyncStore) {
+ wss.withLease = false
+ }
+}
+
+// WSSWithSyncedKeyOverride overrides the "synced" key inserted into the kvstore
+// when initial synchronization completed (by default it corresponds to the prefix).
+func WSSWithSyncedKeyOverride(key string) WSSOpt {
+ return func(wss *wqSyncStore) {
+ wss.syncedKey = key
+ }
+}
+
+// NewWorkqueueSyncStore returns a SyncStore instance which leverages a workqueue
+// to coalescence update/delete requests and handle retries in case of errors.
+func newWorkqueueSyncStore(clusterName string, backend SyncStoreBackend, prefix string, m *Metrics, opts ...WSSOpt) SyncStore {
+ wss := &wqSyncStore{
+ backend: backend,
+ prefix: prefix,
+ source: clusterName,
+
+ workers: 1,
+ withLease: true,
+ limiter: workqueue.DefaultControllerRateLimiter(),
+ syncedKey: prefix,
+
+ log: log.WithField(logfields.Prefix, prefix),
+ }
+
+ for _, opt := range opts {
+ opt(wss)
+ }
+
+ wss.log = wss.log.WithField(logfields.ClusterName, wss.source)
+ wss.workqueue = workqueue.NewRateLimitingQueue(wss.limiter)
+ wss.queuedMetric = m.KVStoreSyncQueueSize.WithLabelValues(kvstore.GetScopeFromKey(prefix), wss.source)
+ wss.syncedMetric = m.KVStoreInitialSyncCompleted.WithLabelValues(kvstore.GetScopeFromKey(prefix), wss.source, "write")
+ return wss
+}
+
+// Run starts the SyncStore logic, blocking until the context is closed.
+func (wss *wqSyncStore) Run(ctx context.Context) {
+ var wg sync.WaitGroup
+
+ wss.syncedMetric.Set(metrics.BoolToFloat64(false))
+ defer wss.syncedMetric.Set(metrics.BoolToFloat64(false))
+
+ wss.backend.RegisterLeaseExpiredObserver(wss.prefix, wss.handleExpiredLease)
+ wss.backend.RegisterLeaseExpiredObserver(wss.getSyncedKey(), wss.handleExpiredLease)
+
+ wss.log.WithField(logfields.Workers, wss.workers).Info("Starting workqueue-based sync store")
+ wg.Add(int(wss.workers))
+ for i := uint(0); i < wss.workers; i++ {
+ go func() {
+ defer wg.Done()
+ for wss.processNextItem(ctx) {
+ }
+ }()
+ }
+
+ <-ctx.Done()
+
+ wss.backend.RegisterLeaseExpiredObserver(wss.prefix, nil)
+ wss.backend.RegisterLeaseExpiredObserver(wss.getSyncedKey(), nil)
+
+ wss.log.Info("Shutting down workqueue-based sync store")
+ wss.workqueue.ShutDown()
+ wg.Wait()
+}
+
+// UpsertKey registers the key for asynchronous upsertion in the kvstore, if the
+// corresponding value has changed. It returns an error in case it is impossible
+// to marshal the value, while kvstore failures are automatically handled through
+// a retry mechanism.
+func (wss *wqSyncStore) UpsertKey(_ context.Context, k Key) error {
+ key := k.GetKeyName()
+ value, err := k.Marshal()
+ if err != nil {
+ return fmt.Errorf("failed marshaling key %q: %w", k, err)
+ }
+
+ prevValue, loaded := wss.state.Swap(key, value)
+ if loaded && bytes.Equal(prevValue.([]byte), value) {
+ wss.log.WithField(logfields.Key, k).Debug("ignoring upsert request for already up-to-date key")
+ } else {
+ if !wss.synced.Load() {
+ wss.pendingSync.Store(key, struct{}{})
+ }
+
+ wss.workqueue.Add(key)
+ wss.queuedMetric.Set(float64(wss.workqueue.Len()))
+ }
+
+ return nil
+}
+
+// DeleteKey registers the key for asynchronous deletion from the kvstore, if it
+// was known to be present. It never returns an error, because kvstore failures
+// are automatically handled through a retry mechanism.
+func (wss *wqSyncStore) DeleteKey(_ context.Context, k NamedKey) error {
+ key := k.GetKeyName()
+ if _, loaded := wss.state.LoadAndDelete(key); loaded {
+ wss.workqueue.Add(key)
+ wss.queuedMetric.Set(float64(wss.workqueue.Len()))
+ } else {
+ wss.log.WithField(logfields.Key, key).Debug("ignoring delete request for non-existing key")
+ }
+
+ return nil
+}
+
+func (wss *wqSyncStore) Synced(_ context.Context, callbacks ...func(ctx context.Context)) error {
+ if synced := wss.synced.Swap(true); !synced {
+ wss.syncedCallbacks = callbacks
+ wss.workqueue.Add(syncCanary{})
+ }
+ return nil
+}
+
+func (wss *wqSyncStore) processNextItem(ctx context.Context) bool {
+ // Retrieve the next key to process from the workqueue.
+ key, shutdown := wss.workqueue.Get()
+ wss.queuedMetric.Set(float64(wss.workqueue.Len()))
+ if shutdown {
+ return false
+ }
+
+ // We call Done here so the workqueue knows we have finished
+ // processing this item.
+ defer func() {
+ wss.workqueue.Done(key)
+ // This ensures that the metric is correctly updated in case of requeues.
+ wss.queuedMetric.Set(float64(wss.workqueue.Len()))
+ }()
+
+ // Run the handler, passing it the key to be processed as parameter.
+ if err := wss.handle(ctx, key); err != nil {
+ // Put the item back on the workqueue to handle any transient errors.
+ wss.workqueue.AddRateLimited(key)
+ return true
+ }
+
+ // Since no error occurred, forget this item so it does not get queued again
+ // until another change happens.
+ wss.workqueue.Forget(key)
+ wss.pendingSync.Delete(key)
+ return true
+}
+
+func (wss *wqSyncStore) handle(ctx context.Context, key interface{}) error {
+ if value, ok := key.(syncCanary); ok {
+ return wss.handleSync(ctx, value.skipCallbacks)
+ }
+
+ if value, ok := wss.state.Load(key); ok {
+ return wss.handleUpsert(ctx, key.(string), value.([]byte))
+ }
+
+ return wss.handleDelete(ctx, key.(string))
+}
+
+func (wss *wqSyncStore) handleUpsert(ctx context.Context, key string, value []byte) error {
+ scopedLog := wss.log.WithField(logfields.Key, key)
+
+ err := wss.backend.Update(ctx, wss.keyPath(key), value, wss.withLease)
+ if err != nil {
+ scopedLog.WithError(err).Warning("Failed upserting key in kvstore. Retrying...")
+ return err
+ }
+
+ scopedLog.Debug("Upserted key in kvstore")
+ return nil
+}
+
+func (wss *wqSyncStore) handleDelete(ctx context.Context, key string) error {
+ scopedLog := wss.log.WithField(logfields.Key, key)
+
+ if err := wss.backend.Delete(ctx, wss.keyPath(key)); err != nil {
+ scopedLog.WithError(err).Warning("Failed deleting key from kvstore. Retrying...")
+ return err
+ }
+
+ scopedLog.Debug("Deleted key from kvstore")
+ return nil
+}
+
+func (wss *wqSyncStore) handleSync(ctx context.Context, skipCallbacks bool) error {
+ // This could be replaced by wss.toSync.Len() == 0 if it only existed...
+ syncCompleted := true
+ wss.pendingSync.Range(func(any, any) bool {
+ syncCompleted = false
+ return false
+ })
+
+ if !syncCompleted {
+ return fmt.Errorf("there are still keys to be synchronized")
+ }
+
+ key := wss.getSyncedKey()
+ scopedLog := wss.log.WithField(logfields.Key, key)
+
+ err := wss.backend.Update(ctx, key, []byte(time.Now().Format(time.RFC3339)), wss.withLease)
+ if err != nil {
+ scopedLog.WithError(err).Warning("Failed upserting synced key in kvstore. Retrying...")
+ return err
+ }
+
+ wss.log.Info("Initial synchronization from the external source completed")
+ wss.syncedMetric.Set(metrics.BoolToFloat64(true))
+
+ // Execute any callback that might have been registered.
+ if !skipCallbacks {
+ for _, callback := range wss.syncedCallbacks {
+ callback(ctx)
+ }
+ }
+
+ return nil
+}
+
+// handleExpiredLease gets executed when the lease attached to a given key expired,
+// and is responsible for enqueuing the given key to recreate it.
+func (wss *wqSyncStore) handleExpiredLease(key string) {
+ defer wss.queuedMetric.Set(float64(wss.workqueue.Len()))
+
+ if key == wss.getSyncedKey() {
+ // Re-enqueue the creation of the sync canary, but make sure that
+ // the registered callbacks are not executed a second time.
+ wss.workqueue.Add(syncCanary{skipCallbacks: true})
+ return
+ }
+
+ key = strings.TrimPrefix(strings.TrimPrefix(key, wss.prefix), "/")
+ _, ok := wss.state.Load(key)
+ if ok {
+ wss.log.WithField(logfields.Key, key).Debug("enqueuing upsert request for key as the attached lease expired")
+ if !wss.synced.Load() {
+ wss.pendingSync.Store(key, struct{}{})
+ }
+
+ wss.workqueue.Add(key)
+ }
+}
+
+// keyPath returns the absolute kvstore path of a key
+func (wss *wqSyncStore) keyPath(key string) string {
+ // WARNING - STABLE API: The composition of the absolute key path
+ // cannot be changed without breaking up and downgrades.
+ return path.Join(wss.prefix, key)
+}
+
+func (wss *wqSyncStore) getSyncedKey() string {
+ return path.Join(kvstore.SyncedPrefix, wss.source, wss.syncedKey)
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/kvstore/store/watchstore.go b/vendor/github.com/cilium/cilium/pkg/kvstore/store/watchstore.go
new file mode 100644
index 000000000..701f3ec00
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/kvstore/store/watchstore.go
@@ -0,0 +1,252 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package store
+
+import (
+ "context"
+ "strings"
+ "sync/atomic"
+
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/sirupsen/logrus"
+
+ "github.com/cilium/cilium/pkg/kvstore"
+ "github.com/cilium/cilium/pkg/logging/logfields"
+ "github.com/cilium/cilium/pkg/metrics"
+ "github.com/cilium/cilium/pkg/metrics/metric"
+)
+
+// WatchStore abstracts the operations allowing to synchronize key/value pairs
+// from a kvstore, emitting the corresponding events.
+type WatchStore interface {
+ // Watch starts watching the specified kvstore prefix, blocking until the context is closed.
+ // Depending on the implementation, it might be executed multiple times.
+ Watch(ctx context.Context, backend WatchStoreBackend, prefix string)
+
+ // NumEntries returns the number of entries synchronized from the store.
+ NumEntries() uint64
+
+ // Synced returns whether the initial list of entries has been retrieved from
+ // the kvstore, and new events are currently being watched.
+ Synced() bool
+
+ // Drain emits a deletion event for each known key. It shall be called only
+ // when no watch operation is in progress.
+ Drain()
+}
+
+// WatchStoreBackend represents the subset of kvstore.BackendOperations leveraged
+// by WatchStore implementations.
+type WatchStoreBackend interface {
+ // ListAndWatch creates a new watcher for the given prefix after listing the existing keys.
+ ListAndWatch(ctx context.Context, name, prefix string, chanSize int) *kvstore.Watcher
+}
+
+type RWSOpt func(*restartableWatchStore)
+
+// WSWithOnSyncCallback registers a function to be executed after
+// listing all keys from the kvstore for the first time. Multiple
+// callback functions can be registered.
+func RWSWithOnSyncCallback(callback func(ctx context.Context)) RWSOpt {
+ return func(rws *restartableWatchStore) {
+ rws.onSyncCallbacks = append(rws.onSyncCallbacks, callback)
+ }
+}
+
+// WSWithEntriesGauge registers a Prometheus gauge metric that is kept
+// in sync with the number of entries synchronized from the kvstore.
+func RWSWithEntriesMetric(gauge prometheus.Gauge) RWSOpt {
+ return func(rws *restartableWatchStore) {
+ rws.entriesMetric = gauge
+ }
+}
+
+type rwsEntry struct {
+ key Key
+ stale bool
+}
+
+// restartableWatchStore implements the WatchStore interface, supporting
+// multiple executions of the Watch() operation (granted that the previous one
+// already terminated). This allows to transparently handle the case in which
+// we had to create a new etcd connection (for instance following a failure)
+// which refers to the same remote cluster.
+type restartableWatchStore struct {
+ source string
+ keyCreator KeyCreator
+ observer Observer
+
+ watching atomic.Bool
+ synced atomic.Bool
+ onSyncCallbacks []func(ctx context.Context)
+
+ // Using a separate entries counter avoids the need for synchronizing the
+ // access to the state map, since the only concurrent reader is represented
+ // by the NumEntries() function.
+ state map[string]*rwsEntry
+ numEntries atomic.Uint64
+
+ log *logrus.Entry
+ entriesMetric prometheus.Gauge
+ syncMetric metric.Vec[metric.Gauge]
+}
+
+// NewRestartableWatchStore returns a WatchStore instance which supports
+// restarting the watch operation multiple times, automatically handling
+// the emission of deletion events for all stale entries (if enabled). It
+// shall be restarted only once the previous Watch execution terminated.
+func newRestartableWatchStore(clusterName string, keyCreator KeyCreator, observer Observer, m *Metrics, opts ...RWSOpt) WatchStore {
+ rws := &restartableWatchStore{
+ source: clusterName,
+ keyCreator: keyCreator,
+ observer: observer,
+
+ state: make(map[string]*rwsEntry),
+
+ log: log,
+ entriesMetric: metrics.NoOpGauge,
+ syncMetric: m.KVStoreInitialSyncCompleted,
+ }
+
+ for _, opt := range opts {
+ opt(rws)
+ }
+
+ rws.log = rws.log.WithField(logfields.ClusterName, rws.source)
+ return rws
+}
+
+// Watch starts watching the specified kvstore prefix, blocking until the context is closed.
+// It might be executed multiple times, granted that the previous execution already terminated.
+func (rws *restartableWatchStore) Watch(ctx context.Context, backend WatchStoreBackend, prefix string) {
+ // Append a trailing "/" to the prefix, to make sure that we watch only
+ // sub-elements belonging to that prefix, and not to sibling prefixes
+ // (for instance in case the last part of the prefix is the cluster name,
+ // and one is the substring of another).
+ if !strings.HasSuffix(prefix, "/") {
+ prefix = prefix + "/"
+ }
+
+ rws.log = rws.log.WithField(logfields.Prefix, prefix)
+ syncedMetric := rws.syncMetric.WithLabelValues(
+ kvstore.GetScopeFromKey(prefix), rws.source, "read")
+
+ rws.log.Info("Starting restartable watch store")
+ syncedMetric.Set(metrics.BoolToFloat64(false))
+
+ if rws.watching.Swap(true) {
+ rws.log.Panic("Cannot start the watch store while still running")
+ }
+
+ defer func() {
+ rws.log.Info("Stopped restartable watch store")
+ syncedMetric.Set(metrics.BoolToFloat64(false))
+ rws.watching.Store(false)
+ rws.synced.Store(false)
+ }()
+
+ // Mark all known keys as stale.
+ for _, entry := range rws.state {
+ entry.stale = true
+ }
+
+ // The events channel is closed when the context is closed.
+ watcher := backend.ListAndWatch(ctx, prefix, prefix, 0)
+ for event := range watcher.Events {
+ if event.Typ == kvstore.EventTypeListDone {
+ rws.log.Debug("Initial synchronization completed")
+ rws.drainKeys(true)
+ syncedMetric.Set(metrics.BoolToFloat64(true))
+ rws.synced.Store(true)
+
+ for _, callback := range rws.onSyncCallbacks {
+ callback(ctx)
+ }
+
+ // Clear the list of callbacks so that they don't get executed
+ // a second time in case of reconnections.
+ rws.onSyncCallbacks = nil
+
+ continue
+ }
+
+ key := strings.TrimPrefix(event.Key, prefix)
+ rws.log.WithFields(logrus.Fields{
+ logfields.Key: key,
+ logfields.Event: event.Typ,
+ }).Debug("Received event from kvstore")
+
+ switch event.Typ {
+ case kvstore.EventTypeCreate, kvstore.EventTypeModify:
+ rws.handleUpsert(key, event.Value)
+ case kvstore.EventTypeDelete:
+ rws.handleDelete(key)
+ }
+ }
+}
+
+// NumEntries returns the number of entries synchronized from the store.
+func (rws *restartableWatchStore) NumEntries() uint64 {
+ return rws.numEntries.Load()
+}
+
+// Synced returns whether the initial list of entries has been retrieved from
+// the kvstore, and new events are currently being watched.
+func (rws *restartableWatchStore) Synced() bool {
+ return rws.synced.Load()
+}
+
+// Drain emits a deletion event for each known key. It shall be called only
+// when no watch operation is in progress.
+func (rws *restartableWatchStore) Drain() {
+ if rws.watching.Swap(true) {
+ rws.log.Panic("Cannot drain the watch store while still running")
+ }
+ defer rws.watching.Store(false)
+
+ rws.log.Info("Draining restartable watch store")
+ rws.drainKeys(false)
+ rws.log.Info("Drained restartable watch store")
+}
+
+// drainKeys emits synthetic deletion events:
+// * staleOnly == true: for all keys marked as stale;
+// * staleOnly == false: for all known keys;
+func (rws *restartableWatchStore) drainKeys(staleOnly bool) {
+ for key, entry := range rws.state {
+ if !staleOnly || entry.stale {
+ rws.log.WithField(logfields.Key, key).Debug("Emitting deletion event for stale key")
+ rws.handleDelete(key)
+ }
+ }
+}
+
+func (rws *restartableWatchStore) handleUpsert(key string, value []byte) {
+ entry := &rwsEntry{key: rws.keyCreator()}
+ if err := entry.key.Unmarshal(key, value); err != nil {
+ rws.log.WithFields(logrus.Fields{
+ logfields.Key: key,
+ logfields.Value: string(value),
+ }).WithError(err).Warning("Unable to unmarshal value")
+ return
+ }
+
+ rws.state[key] = entry
+ rws.numEntries.Store(uint64(len(rws.state)))
+ rws.entriesMetric.Set(float64(len(rws.state)))
+ rws.observer.OnUpdate(entry.key)
+}
+
+func (rws *restartableWatchStore) handleDelete(key string) {
+ entry, ok := rws.state[key]
+ if !ok {
+ rws.log.WithField(logfields.Key, key).Warning("Received deletion event for unknown key")
+ return
+ }
+
+ delete(rws.state, key)
+ rws.numEntries.Store(uint64(len(rws.state)))
+ rws.entriesMetric.Set(float64(len(rws.state)))
+ rws.observer.OnDelete(entry.key)
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/kvstore/store/watchstoremgr.go b/vendor/github.com/cilium/cilium/pkg/kvstore/store/watchstoremgr.go
new file mode 100644
index 000000000..44f85114c
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/kvstore/store/watchstoremgr.go
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package store
+
+import (
+ "context"
+ "path"
+ "sync"
+ "sync/atomic"
+
+ "github.com/sirupsen/logrus"
+
+ "github.com/cilium/cilium/pkg/kvstore"
+ "github.com/cilium/cilium/pkg/logging/logfields"
+)
+
+// WSMFunc if a function which can be registered in the WatchStoreManager.
+type WSMFunc func(context.Context)
+
+// WatchStoreManager enables to register a set of functions to be asynchronously
+// executed when the corresponding kvstore prefixes are synchronized (based on
+// the implementation).
+type WatchStoreManager interface {
+ // Register registers a function associated with a given kvstore prefix.
+ // It cannot be called once Run() has started.
+ Register(prefix string, function WSMFunc)
+ // Run starts the manager, blocking until the context is closed and all
+ // started functions terminated.
+ Run(ctx context.Context)
+}
+
+// wsmCommon implements the common logic shared by WatchStoreManager implementations.
+type wsmCommon struct {
+ wg sync.WaitGroup
+ functions map[string]WSMFunc
+
+ running atomic.Bool
+ log *logrus.Entry
+}
+
+func newWSMCommon(clusterName string) wsmCommon {
+ return wsmCommon{
+ functions: make(map[string]WSMFunc),
+ log: log.WithField(logfields.ClusterName, clusterName),
+ }
+}
+
+// Register registers a function associated with a given kvstore prefix.
+// It cannot be called once Run() has started.
+func (mgr *wsmCommon) Register(prefix string, function WSMFunc) {
+ if mgr.running.Load() {
+ mgr.log.Panic("Cannot call Register while the watch store manager is running")
+ }
+
+ mgr.functions[prefix] = function
+}
+
+func (mgr *wsmCommon) ready(ctx context.Context, prefix string) {
+ if fn := mgr.functions[prefix]; fn != nil {
+ mgr.log.WithField(logfields.Prefix, prefix).Debug("Starting function for kvstore prefix")
+ delete(mgr.functions, prefix)
+
+ mgr.wg.Add(1)
+ go func() {
+ defer mgr.wg.Done()
+ fn(ctx)
+ mgr.log.WithField(logfields.Prefix, prefix).Debug("Function terminated for kvstore prefix")
+ }()
+ } else {
+ mgr.log.WithField(logfields.Prefix, prefix).Debug("Received sync event for unregistered prefix")
+ }
+}
+
+func (mgr *wsmCommon) run() {
+ mgr.log.Info("Starting watch store manager")
+ if mgr.running.Swap(true) {
+ mgr.log.Panic("Cannot start the watch store manager twice")
+ }
+}
+
+func (mgr *wsmCommon) wait() {
+ mgr.wg.Wait()
+ mgr.log.Info("Stopped watch store manager")
+}
+
+type wsmSync struct {
+ wsmCommon
+
+ clusterName string
+ backend WatchStoreBackend
+ store WatchStore
+ onUpdate func(prefix string)
+}
+
+// NewWatchStoreManagerSync implements the WatchStoreManager interface, starting the
+// registered functions only once the corresponding prefix sync canary has been received.
+// This ensures that the synchronization of the keys hosted under the given prefix
+// have been successfully synchronized from the external source, even in case an
+// ephemeral kvstore is used.
+func newWatchStoreManagerSync(backend WatchStoreBackend, clusterName string, factory Factory) WatchStoreManager {
+ mgr := wsmSync{
+ wsmCommon: newWSMCommon(clusterName),
+ clusterName: clusterName,
+ backend: backend,
+ }
+
+ mgr.store = factory.NewWatchStore(clusterName, KVPairCreator, &mgr)
+ return &mgr
+}
+
+// Run starts the manager, blocking until the context is closed and all
+// started functions terminated.
+func (mgr *wsmSync) Run(ctx context.Context) {
+ mgr.run()
+ mgr.onUpdate = func(prefix string) { mgr.ready(ctx, prefix) }
+ mgr.store.Watch(ctx, mgr.backend, path.Join(kvstore.SyncedPrefix, mgr.clusterName))
+ mgr.wait()
+}
+
+func (mgr *wsmSync) OnUpdate(k Key) { mgr.onUpdate(k.GetKeyName()) }
+func (mgr *wsmSync) OnDelete(k NamedKey) {}
+
+type wsmImmediate struct {
+ wsmCommon
+}
+
+// NewWatchStoreManagerImmediate implements the WatchStoreManager interface,
+// immediately starting the registered functions once Run() is executed.
+func NewWatchStoreManagerImmediate(clusterName string) WatchStoreManager {
+ return &wsmImmediate{
+ wsmCommon: newWSMCommon(clusterName),
+ }
+}
+
+// Run starts the manager, blocking until the context is closed and all
+// started functions terminated.
+func (mgr *wsmImmediate) Run(ctx context.Context) {
+ mgr.run()
+ for prefix := range mgr.functions {
+ mgr.ready(ctx, prefix)
+ }
+ mgr.wait()
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/kvstore/trace.go b/vendor/github.com/cilium/cilium/pkg/kvstore/trace.go
new file mode 100644
index 000000000..a58f966ba
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/kvstore/trace.go
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package kvstore
+
+import (
+ "strings"
+
+ "github.com/sirupsen/logrus"
+
+ "github.com/cilium/cilium/pkg/logging"
+)
+
+var (
+ // Debugging can be enabled at compile with:
+ // -ldflags "-X "github.com/cilium/cilium/pkg/kvstore".Debug=true"
+ Debug string
+
+ traceEnabled bool
+)
+
+// EnableTracing enables kvstore tracing
+func EnableTracing() {
+ traceEnabled = true
+}
+
+// Trace is used to trace kvstore debug messages
+func Trace(format string, err error, fields logrus.Fields, a ...interface{}) {
+ if traceEnabled {
+ log.WithError(err).WithFields(fields).Debugf(format)
+ }
+}
+
+func init() {
+ if strings.ToLower(Debug) == "true" {
+ logging.DefaultLogger.SetLevel(logrus.DebugLevel)
+ traceEnabled = true
+ }
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/kvstore/watcher_cache.go b/vendor/github.com/cilium/cilium/pkg/kvstore/watcher_cache.go
new file mode 100644
index 000000000..a3bf7cbf5
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/kvstore/watcher_cache.go
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package kvstore
+
+type watchState struct {
+ deletionMark bool
+}
+
+type watcherCache map[string]watchState
+
+func (wc watcherCache) Exists(key []byte) bool {
+ if _, ok := wc[string(key)]; ok {
+ return true
+ }
+
+ return false
+}
+
+func (wc watcherCache) RemoveDeleted(f func(string)) {
+ for k, localKey := range wc {
+ if localKey.deletionMark {
+ f(k)
+ delete(wc, k)
+ }
+ }
+}
+
+func (wc watcherCache) MarkAllForDeletion() {
+ for k := range wc {
+ wc[k] = watchState{deletionMark: true}
+ }
+}
+
+func (wc watcherCache) MarkInUse(key []byte) {
+ wc[string(key)] = watchState{deletionMark: false}
+}
+
+func (wc watcherCache) RemoveKey(key []byte) {
+ delete(wc, string(key))
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/labels/arraylist.go b/vendor/github.com/cilium/cilium/pkg/labels/arraylist.go
index f08ec7938..11e5669ec 100644
--- a/vendor/github.com/cilium/cilium/pkg/labels/arraylist.go
+++ b/vendor/github.com/cilium/cilium/pkg/labels/arraylist.go
@@ -57,36 +57,35 @@ func (ls LabelArrayList) Sort() LabelArrayList {
}
// Merge incorporates new LabelArrays into an existing LabelArrayList, without
-// introducing duplicates, returning the result for convenience. The LabelArrays
-// contained in either LabelArrayList must already be sorted. Existing
+// introducing duplicates, returning the result for convenience. Existing
// duplication in either list is not removed.
func (lsp *LabelArrayList) Merge(include ...LabelArray) LabelArrayList {
lsp.Sort()
incl := LabelArrayList(include).Sort()
- return lsp.mergeSorted(incl)
+ return lsp.MergeSorted(incl)
}
-func (lsp *LabelArrayList) mergeSorted(include LabelArrayList) LabelArrayList {
- ls := *lsp
- merged := make(LabelArrayList, 0, len(include)+len(ls))
-
- var i, j int
- for i < len(include) && j < len(ls) {
- if ls[j].Less(include[i]) {
- merged = append(merged, ls[j])
- j++
- } else if ls[j].Equals(include[i]) {
- merged = append(merged, ls[j])
+// MergeSorted incorporates new labels from 'include' to the receiver,
+// both of which must be already sorted.
+// LabelArrays are inserted from 'include' to the receiver as needed.
+func (lsp *LabelArrayList) MergeSorted(include LabelArrayList) LabelArrayList {
+ merged := *lsp
+ i := 0
+ for j := 0; i < len(include) && j < len(merged); j++ {
+ if include[i].Less(merged[j]) {
+ merged = append(merged[:j+1], merged[j:]...) // make space at merged[j]
+ merged[j] = include[i]
i++
- j++
- } else {
- merged = append(merged, include[i])
+ } else if include[i].Equals(merged[j]) {
i++
}
}
- merged = append(merged, ls[j:]...)
- merged = append(merged, include[i:]...)
+ // 'include' may have more entries after original labels have been exhausted
+ if i < len(include) {
+ merged = append(merged, include[i:]...)
+ }
+
*lsp = merged
- return merged
+ return *lsp
}
diff --git a/vendor/github.com/cilium/cilium/pkg/labels/cidr/cidr.go b/vendor/github.com/cilium/cilium/pkg/labels/cidr/cidr.go
new file mode 100644
index 000000000..13410e9cb
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/labels/cidr/cidr.go
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package cidr
+
+import (
+ "fmt"
+ "net/netip"
+ "strconv"
+ "strings"
+
+ "github.com/cilium/cilium/pkg/labels"
+ "github.com/cilium/cilium/pkg/option"
+)
+
+// maskedIPToLabelString is the base method for serializing an IP + prefix into
+// a string that can be used for creating Labels and EndpointSelector objects.
+//
+// For IPv6 addresses, it converts ":" into "-" as EndpointSelectors don't
+// support colons inside the name section of a label.
+func maskedIPToLabelString(ip netip.Addr, prefix int) string {
+ ipStr := ip.String()
+ ipNoColons := strings.Replace(ipStr, ":", "-", -1)
+
+ // EndpointSelector keys can't start or end with a "-", so insert a
+ // zero at the start or end if it would otherwise have a "-" at that
+ // position.
+ preZero := ""
+ postZero := ""
+ if ipNoColons[0] == '-' {
+ preZero = "0"
+ }
+ if ipNoColons[len(ipNoColons)-1] == '-' {
+ postZero = "0"
+ }
+ var str strings.Builder
+ str.Grow(
+ len(labels.LabelSourceCIDR) +
+ len(preZero) +
+ len(ipNoColons) +
+ len(postZero) +
+ 2 /*len of prefix*/ +
+ 2, /* ':' '/' */
+ )
+ str.WriteString(labels.LabelSourceCIDR)
+ str.WriteRune(':')
+ str.WriteString(preZero)
+ str.WriteString(ipNoColons)
+ str.WriteString(postZero)
+ str.WriteRune('/')
+ str.WriteString(strconv.Itoa(prefix))
+ return str.String()
+}
+
+// IPStringToLabel parses a string and returns it as a CIDR label.
+//
+// If ip is not a valid IP address or CIDR Prefix, returns an error.
+func IPStringToLabel(ip string) (labels.Label, error) {
+ var lblString string
+ // factored out of netip.ParsePrefix to avoid allocating an empty netip.Prefix in case it's
+ // an IP and not a CIDR.
+ i := strings.LastIndexByte(ip, '/')
+ if i < 0 {
+ parsedIP, err := netip.ParseAddr(ip)
+ if err != nil {
+ return labels.Label{}, fmt.Errorf("%q is not an IP address: %w", ip, err)
+ }
+ lblString = maskedIPToLabelString(parsedIP, parsedIP.BitLen())
+ } else {
+ parsedPrefix, err := netip.ParsePrefix(ip)
+ if err != nil {
+ return labels.Label{}, fmt.Errorf("%q is not a CIDR: %w", ip, err)
+ }
+ lblString = maskedIPToLabelString(parsedPrefix.Masked().Addr(), parsedPrefix.Bits())
+ }
+ return labels.ParseLabel(lblString), nil
+}
+
+// GetCIDRLabels turns a CIDR into a set of labels representing the cidr itself
+// and all broader CIDRS which include the specified CIDR in them. For example:
+// CIDR: 10.0.0.0/8 =>
+//
+// "cidr:10.0.0.0/8", "cidr:10.0.0.0/7", "cidr:8.0.0.0/6",
+// "cidr:8.0.0.0/5", "cidr:0.0.0.0/4, "cidr:0.0.0.0/3",
+// "cidr:0.0.0.0/2", "cidr:0.0.0.0/1", "cidr:0.0.0.0/0"
+//
+// The identity reserved:world is always added as it includes any CIDR.
+func GetCIDRLabels(prefix netip.Prefix) labels.Labels {
+ ones := prefix.Bits()
+ result := make([]string, 0, ones+2)
+
+ // If ones is zero, then it's the default CIDR prefix /0 which should
+ // just be regarded as reserved:world. In all other cases, we need
+ // to generate the set of prefixes starting from the /0 up to the
+ // specified prefix length.
+ if ones > 0 {
+ ip := prefix.Addr()
+ for i := 0; i <= ones; i++ {
+ p := netip.PrefixFrom(ip, i)
+ label := maskedIPToLabelString(p.Masked().Addr(), i)
+ result = append(result, label)
+ }
+ }
+
+ if option.Config.IsDualStack() {
+ if prefix.Addr().Is4() {
+ result = append(result, labels.LabelSourceReserved+":"+labels.IDNameWorldIPv4)
+ } else {
+ result = append(result, labels.LabelSourceReserved+":"+labels.IDNameWorldIPv6)
+ }
+ } else {
+ result = append(result, labels.LabelSourceReserved+":"+labels.IDNameWorld)
+ }
+
+ return labels.NewLabelsFromModel(result)
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/labels/cidr/doc.go b/vendor/github.com/cilium/cilium/pkg/labels/cidr/doc.go
new file mode 100644
index 000000000..f97bd9a51
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/labels/cidr/doc.go
@@ -0,0 +1,6 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Package cidr provides helper methods for generating labels for CIDRs which
+// are partially derived from node state.
+package cidr
diff --git a/vendor/github.com/cilium/cilium/pkg/labels/labels.go b/vendor/github.com/cilium/cilium/pkg/labels/labels.go
index 1eaf46e47..d130ca357 100644
--- a/vendor/github.com/cilium/cilium/pkg/labels/labels.go
+++ b/vendor/github.com/cilium/cilium/pkg/labels/labels.go
@@ -26,6 +26,14 @@ const (
// IDNameWorld is the label used for the world ID.
IDNameWorld = "world"
+ // IDNameWorldIPv4 is the label used for the world-ipv4 ID, to distinguish
+ // it from world-ipv6 in dual-stack mode.
+ IDNameWorldIPv4 = "world-ipv4"
+
+ // IDNameWorldIPv6 is the label used for the world-ipv6 ID, to distinguish
+ // it from world-ipv4 in dual-stack mode.
+ IDNameWorldIPv6 = "world-ipv6"
+
// IDNameCluster is the label used to identify an unspecified endpoint
// inside the cluster
IDNameCluster = "cluster"
@@ -69,6 +77,12 @@ var (
// LabelWorld is the label used for world.
LabelWorld = Labels{IDNameWorld: NewLabel(IDNameWorld, "", LabelSourceReserved)}
+ // LabelWorldIPv4 is the label used for world-ipv4.
+ LabelWorldIPv4 = Labels{IDNameWorldIPv4: NewLabel(IDNameWorldIPv4, "", LabelSourceReserved)}
+
+ // LabelWorldIPv6 is the label used for world-ipv6.
+ LabelWorldIPv6 = Labels{IDNameWorldIPv6: NewLabel(IDNameWorldIPv6, "", LabelSourceReserved)}
+
// LabelRemoteNode is the label used for remote nodes.
LabelRemoteNode = Labels{IDNameRemoteNode: NewLabel(IDNameRemoteNode, "", LabelSourceReserved)}
@@ -469,13 +483,24 @@ func (l Label) FormatForKVStore() []byte {
// kvstore.prefixMatchesKey())
b := make([]byte, 0, len(l.Source)+len(l.Key)+len(l.Value)+3)
buf := bytes.NewBuffer(b)
+ l.formatForKVStoreInto(buf)
+ return buf.Bytes()
+}
+
+// formatForKVStoreInto writes the label as a formatted string, ending in
+// a semicolon into buf.
+//
+// DO NOT BREAK THE FORMAT OF THIS. THE RETURNED STRING IS USED AS
+// PART OF THE KEY IN THE KEY-VALUE STORE.
+//
+// Non-pointer receiver allows this to be called on a value in a map.
+func (l Label) formatForKVStoreInto(buf *bytes.Buffer) {
buf.WriteString(l.Source)
buf.WriteRune(':')
buf.WriteString(l.Key)
buf.WriteRune('=')
buf.WriteString(l.Value)
buf.WriteRune(';')
- return buf.Bytes()
}
// SortedList returns the labels as a sorted list, separated by semicolon
@@ -489,10 +514,21 @@ func (l Labels) SortedList() []byte {
}
sort.Strings(keys)
- b := make([]byte, 0, len(keys)*2)
+ // Labels can have arbitrary size. However, when many CIDR identities are in
+ // the system, for example due to a FQDN policy matching S3, CIDR labels
+ // dominate in number. IPv4 CIDR labels in serialized form are max 25 bytes
+ // long. Allocate slightly more to avoid having a realloc if there's some
+ // other labels which may longer, since the cost of allocating a few bytes
+ // more is dominated by a second allocation, especially since these
+ // allocations are short-lived.
+ //
+ // cidr:123.123.123.123/32=;
+ // 0 1 2
+ // 1234567890123456789012345
+ b := make([]byte, 0, len(keys)*30)
buf := bytes.NewBuffer(b)
for _, k := range keys {
- buf.Write(l[k].FormatForKVStore())
+ l[k].formatForKVStoreInto(buf)
}
return buf.Bytes()
diff --git a/vendor/github.com/cilium/cilium/pkg/loadbalancer/doc.go b/vendor/github.com/cilium/cilium/pkg/loadbalancer/doc.go
new file mode 100644
index 000000000..997e3f7f1
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/loadbalancer/doc.go
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// +k8s:deepcopy-gen=package,register
+
+// Package loadbalancer contains all logic related with the loadbalancer
+// +groupName=pkg
+package loadbalancer
diff --git a/vendor/github.com/cilium/cilium/pkg/loadbalancer/loadbalancer.go b/vendor/github.com/cilium/cilium/pkg/loadbalancer/loadbalancer.go
new file mode 100644
index 000000000..29b7f4280
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/loadbalancer/loadbalancer.go
@@ -0,0 +1,820 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package loadbalancer
+
+import (
+ "fmt"
+ "net"
+ "sort"
+ "strconv"
+ "strings"
+
+ "github.com/cilium/cilium/api/v1/models"
+ "github.com/cilium/cilium/pkg/cidr"
+ cmtypes "github.com/cilium/cilium/pkg/clustermesh/types"
+ "github.com/cilium/cilium/pkg/option"
+)
+
+// SVCType is a type of a service.
+type SVCType string
+
+const (
+ SVCTypeNone = SVCType("NONE")
+ SVCTypeHostPort = SVCType("HostPort")
+ SVCTypeClusterIP = SVCType("ClusterIP")
+ SVCTypeNodePort = SVCType("NodePort")
+ SVCTypeExternalIPs = SVCType("ExternalIPs")
+ SVCTypeLoadBalancer = SVCType("LoadBalancer")
+ SVCTypeLocalRedirect = SVCType("LocalRedirect")
+)
+
+// SVCTrafficPolicy defines which backends are chosen
+type SVCTrafficPolicy string
+
+const (
+ SVCTrafficPolicyNone = SVCTrafficPolicy("NONE")
+ SVCTrafficPolicyCluster = SVCTrafficPolicy("Cluster")
+ SVCTrafficPolicyLocal = SVCTrafficPolicy("Local")
+)
+
+// SVCNatPolicy defines whether we need NAT46/64 translation for backends
+type SVCNatPolicy string
+
+const (
+ SVCNatPolicyNone = SVCNatPolicy("NONE")
+ SVCNatPolicyNat46 = SVCNatPolicy("Nat46")
+ SVCNatPolicyNat64 = SVCNatPolicy("Nat64")
+)
+
+// ServiceFlags is the datapath representation of the service flags that can be
+// used (lb{4,6}_service.flags)
+type ServiceFlags uint16
+
+const (
+ serviceFlagNone = 0
+ serviceFlagExternalIPs = 1 << 0
+ serviceFlagNodePort = 1 << 1
+ serviceFlagExtLocalScope = 1 << 2
+ serviceFlagHostPort = 1 << 3
+ serviceFlagSessionAffinity = 1 << 4
+ serviceFlagLoadBalancer = 1 << 5
+ serviceFlagRoutable = 1 << 6
+ serviceFlagSourceRange = 1 << 7
+ serviceFlagLocalRedirect = 1 << 8
+ serviceFlagNat46x64 = 1 << 9
+ serviceFlagL7LoadBalancer = 1 << 10
+ serviceFlagLoopback = 1 << 11
+ serviceFlagIntLocalScope = 1 << 12
+ serviceFlagTwoScopes = 1 << 13
+)
+
+type SvcFlagParam struct {
+ SvcType SVCType
+ SvcNatPolicy SVCNatPolicy
+ SvcExtLocal bool
+ SvcIntLocal bool
+ SessionAffinity bool
+ IsRoutable bool
+ CheckSourceRange bool
+ L7LoadBalancer bool
+ LoopbackHostport bool
+}
+
+// NewSvcFlag creates service flag
+func NewSvcFlag(p *SvcFlagParam) ServiceFlags {
+ var flags ServiceFlags
+
+ switch p.SvcType {
+ case SVCTypeExternalIPs:
+ flags |= serviceFlagExternalIPs
+ case SVCTypeNodePort:
+ flags |= serviceFlagNodePort
+ case SVCTypeLoadBalancer:
+ flags |= serviceFlagLoadBalancer
+ case SVCTypeHostPort:
+ flags |= serviceFlagHostPort
+ if p.LoopbackHostport {
+ flags |= serviceFlagLoopback
+ }
+ case SVCTypeLocalRedirect:
+ flags |= serviceFlagLocalRedirect
+ }
+
+ switch p.SvcNatPolicy {
+ case SVCNatPolicyNat46:
+ fallthrough
+ case SVCNatPolicyNat64:
+ flags |= serviceFlagNat46x64
+ }
+
+ if p.SvcExtLocal {
+ flags |= serviceFlagExtLocalScope
+ }
+ if p.SvcIntLocal {
+ flags |= serviceFlagIntLocalScope
+ }
+ if p.SessionAffinity {
+ flags |= serviceFlagSessionAffinity
+ }
+ if p.IsRoutable {
+ flags |= serviceFlagRoutable
+ }
+ if p.CheckSourceRange {
+ flags |= serviceFlagSourceRange
+ }
+ if p.L7LoadBalancer {
+ flags |= serviceFlagL7LoadBalancer
+ }
+ if p.SvcExtLocal != p.SvcIntLocal && p.SvcType != SVCTypeClusterIP {
+ flags |= serviceFlagTwoScopes
+ }
+
+ return flags
+}
+
+// SVCType returns a service type from the flags
+func (s ServiceFlags) SVCType() SVCType {
+ switch {
+ case s&serviceFlagExternalIPs != 0:
+ return SVCTypeExternalIPs
+ case s&serviceFlagNodePort != 0:
+ return SVCTypeNodePort
+ case s&serviceFlagLoadBalancer != 0:
+ return SVCTypeLoadBalancer
+ case s&serviceFlagHostPort != 0:
+ return SVCTypeHostPort
+ case s&serviceFlagLocalRedirect != 0:
+ return SVCTypeLocalRedirect
+ default:
+ return SVCTypeClusterIP
+ }
+}
+
+// SVCExtTrafficPolicy returns a service traffic policy from the flags
+func (s ServiceFlags) SVCExtTrafficPolicy() SVCTrafficPolicy {
+ switch {
+ case s&serviceFlagExtLocalScope != 0:
+ return SVCTrafficPolicyLocal
+ default:
+ return SVCTrafficPolicyCluster
+ }
+}
+
+// SVCIntTrafficPolicy returns a service traffic policy from the flags
+func (s ServiceFlags) SVCIntTrafficPolicy() SVCTrafficPolicy {
+ switch {
+ case s&serviceFlagIntLocalScope != 0:
+ return SVCTrafficPolicyLocal
+ default:
+ return SVCTrafficPolicyCluster
+ }
+}
+
+// SVCNatPolicy returns a service NAT policy from the flags
+func (s ServiceFlags) SVCNatPolicy(fe L3n4Addr) SVCNatPolicy {
+ if s&serviceFlagNat46x64 == 0 {
+ return SVCNatPolicyNone
+ }
+
+ if fe.IsIPv6() {
+ return SVCNatPolicyNat64
+ } else {
+ return SVCNatPolicyNat46
+ }
+}
+
+// String returns the string implementation of ServiceFlags.
+func (s ServiceFlags) String() string {
+ var str []string
+
+ str = append(str, string(s.SVCType()))
+ if s&serviceFlagExtLocalScope != 0 {
+ str = append(str, string(SVCTrafficPolicyLocal))
+ }
+ if s&serviceFlagIntLocalScope != 0 {
+ str = append(str, "Internal"+string(SVCTrafficPolicyLocal))
+ }
+ if s&serviceFlagTwoScopes != 0 {
+ str = append(str, "two-scopes")
+ }
+ if s&serviceFlagSessionAffinity != 0 {
+ str = append(str, "sessionAffinity")
+ }
+ if s&serviceFlagRoutable == 0 {
+ str = append(str, "non-routable")
+ }
+ if s&serviceFlagSourceRange != 0 {
+ str = append(str, "check source-range")
+ }
+ if s&serviceFlagNat46x64 != 0 {
+ str = append(str, "46x64")
+ }
+ if s&serviceFlagL7LoadBalancer != 0 {
+ str = append(str, "l7-load-balancer")
+ }
+ if s&serviceFlagLoopback != 0 {
+ str = append(str, "loopback")
+ }
+
+ return strings.Join(str, ", ")
+}
+
+// UInt8 returns the UInt16 representation of the ServiceFlags.
+func (s ServiceFlags) UInt16() uint16 {
+ return uint16(s)
+}
+
+const (
+ NONE = L4Type("NONE")
+ // TCP type.
+ TCP = L4Type("TCP")
+ // UDP type.
+ UDP = L4Type("UDP")
+ // SCTP type.
+ SCTP = L4Type("SCTP")
+)
+
+const (
+ // ScopeExternal is the lookup scope for services from outside the node.
+ ScopeExternal uint8 = iota
+ // ScopeInternal is the lookup scope for services from inside the node.
+ ScopeInternal
+)
+
+// BackendState tracks backend's ability to load-balance service traffic.
+//
+// Valid transition states for a backend -
+// BackendStateActive -> BackendStateTerminating, BackendStateQuarantined, BackendStateMaintenance
+// BackendStateTerminating -> No valid state transition
+// BackendStateQuarantined -> BackendStateActive, BackendStateTerminating
+// BackendStateMaintenance -> BackendStateActive
+//
+// Sources setting the states -
+// BackendStateActive - Kubernetes events, service API
+// BackendStateTerminating - Kubernetes events
+// BackendStateQuarantined - service API
+// BackendStateMaintenance - service API
+const (
+ // BackendStateActive refers to the backend state when it's available for
+ // load-balancing traffic. It's the default state for a backend.
+ // Backends in this state can be health-checked.
+ BackendStateActive BackendState = iota
+ // BackendStateTerminating refers to the terminating backend state so that
+ // it can be gracefully removed.
+ // Backends in this state won't be health-checked.
+ BackendStateTerminating
+ // BackendStateQuarantined refers to the backend state when it's unreachable,
+ // and will not be selected for load-balancing traffic.
+ // Backends in this state can be health-checked.
+ BackendStateQuarantined
+ // BackendStateMaintenance refers to the backend state where the backend
+ // is put under maintenance, and will neither be selected for load-balancing
+ // traffic nor be health-checked.
+ BackendStateMaintenance
+ // BackendStateInvalid is an invalid state, and is used to report error conditions.
+ // Keep this as the last entry.
+ BackendStateInvalid
+)
+
+// BackendStateFlags is the datapath representation of the backend flags that
+// are used in (lb{4,6}_backend.flags) to store backend state.
+type BackendStateFlags = uint8
+
+const (
+ BackendStateActiveFlag = iota
+ BackendStateTerminatingFlag
+ BackendStateQuarantinedFlag
+ BackendStateMaintenanceFlag
+)
+
+func NewBackendFlags(state BackendState) BackendStateFlags {
+ var flags BackendStateFlags
+
+ switch state {
+ case BackendStateActive:
+ flags = BackendStateActiveFlag
+ case BackendStateTerminating:
+ flags = BackendStateTerminatingFlag
+ case BackendStateQuarantined:
+ flags = BackendStateQuarantinedFlag
+ case BackendStateMaintenance:
+ flags = BackendStateMaintenanceFlag
+ }
+
+ return flags
+}
+
+func GetBackendStateFromFlags(flags uint8) BackendState {
+ switch flags {
+ case BackendStateTerminatingFlag:
+ return BackendStateTerminating
+ case BackendStateQuarantinedFlag:
+ return BackendStateQuarantined
+ case BackendStateMaintenanceFlag:
+ return BackendStateMaintenance
+ default:
+ return BackendStateActive
+ }
+}
+
+// DefaultBackendWeight is used when backend weight is not set in ServiceSpec
+const DefaultBackendWeight = 100
+
+var (
+ // AllProtocols is the list of all supported L4 protocols
+ AllProtocols = []L4Type{TCP, UDP, SCTP}
+)
+
+// L4Type name.
+type L4Type = string
+
+// FEPortName is the name of the frontend's port.
+type FEPortName string
+
+// ServiceID is the service's ID.
+type ServiceID uint16
+
+// ServiceName represents the fully-qualified reference to the service by name,
+// including both the namespace and name of the service (and optionally the cluster).
+type ServiceName struct {
+ Namespace string
+ Name string
+ Cluster string
+}
+
+func (n ServiceName) String() string {
+ if n.Cluster != "" {
+ return n.Cluster + "/" + n.Namespace + "/" + n.Name
+ }
+
+ return n.Namespace + "/" + n.Name
+}
+
+// BackendID is the backend's ID.
+type BackendID uint32
+
+// ID is the ID of L3n4Addr endpoint (either service or backend).
+type ID uint32
+
+// BackendState is the state of a backend for load-balancing service traffic.
+type BackendState uint8
+
+// Preferred indicates if this backend is preferred to be load balanced.
+type Preferred bool
+
+// Backend represents load balancer backend.
+type Backend struct {
+ // FEPortName is the frontend port name. This is used to filter backends sending to EDS.
+ FEPortName string
+ // ID of the backend
+ ID BackendID
+ // Weight of backend
+ Weight uint16
+ // Node hosting this backend. This is used to determine backends local to
+ // a node.
+ NodeName string
+ L3n4Addr
+ // State of the backend for load-balancing service traffic
+ State BackendState
+ // Preferred indicates if the healthy backend is preferred
+ Preferred Preferred
+}
+
+func (b *Backend) String() string {
+ return b.L3n4Addr.String()
+}
+
+// SVC is a structure for storing service details.
+type SVC struct {
+ Frontend L3n4AddrID // SVC frontend addr and an allocated ID
+ Backends []*Backend // List of service backends
+ Type SVCType // Service type
+ ExtTrafficPolicy SVCTrafficPolicy // Service external traffic policy
+ IntTrafficPolicy SVCTrafficPolicy // Service internal traffic policy
+ NatPolicy SVCNatPolicy // Service NAT 46/64 policy
+ SessionAffinity bool
+ SessionAffinityTimeoutSec uint32
+ HealthCheckNodePort uint16 // Service health check node port
+ Name ServiceName // Fully qualified service name
+ LoadBalancerSourceRanges []*cidr.CIDR
+ L7LBProxyPort uint16 // Non-zero for L7 LB services
+ L7LBFrontendPorts []string // Non-zero for L7 LB frontend service ports
+ LoopbackHostport bool
+}
+
+func (s *SVC) GetModel() *models.Service {
+ var natPolicy string
+ type backendPlacement struct {
+ pos int
+ id BackendID
+ }
+
+ if s == nil {
+ return nil
+ }
+
+ id := int64(s.Frontend.ID)
+ if s.NatPolicy != SVCNatPolicyNone {
+ natPolicy = string(s.NatPolicy)
+ }
+ spec := &models.ServiceSpec{
+ ID: id,
+ FrontendAddress: s.Frontend.GetModel(),
+ BackendAddresses: make([]*models.BackendAddress, len(s.Backends)),
+ Flags: &models.ServiceSpecFlags{
+ Type: string(s.Type),
+ TrafficPolicy: string(s.ExtTrafficPolicy),
+ ExtTrafficPolicy: string(s.ExtTrafficPolicy),
+ IntTrafficPolicy: string(s.IntTrafficPolicy),
+ NatPolicy: natPolicy,
+ HealthCheckNodePort: s.HealthCheckNodePort,
+
+ Name: s.Name.Name,
+ Namespace: s.Name.Namespace,
+ },
+ }
+
+ if s.Name.Cluster != option.Config.ClusterName {
+ spec.Flags.Cluster = s.Name.Cluster
+ }
+
+ placements := make([]backendPlacement, len(s.Backends))
+ for i, be := range s.Backends {
+ placements[i] = backendPlacement{pos: i, id: be.ID}
+ }
+ sort.Slice(placements,
+ func(i, j int) bool { return placements[i].id < placements[j].id })
+ for i, placement := range placements {
+ spec.BackendAddresses[i] = s.Backends[placement.pos].GetBackendModel()
+ }
+
+ return &models.Service{
+ Spec: spec,
+ Status: &models.ServiceStatus{
+ Realized: spec,
+ },
+ }
+}
+
+func IsValidStateTransition(old, new BackendState) bool {
+ if old == new {
+ return true
+ }
+ if new == BackendStateInvalid {
+ return false
+ }
+
+ switch old {
+ case BackendStateActive:
+ case BackendStateTerminating:
+ return false
+ case BackendStateQuarantined:
+ if new == BackendStateMaintenance {
+ return false
+ }
+ case BackendStateMaintenance:
+ if new != BackendStateActive {
+ return false
+ }
+ default:
+ return false
+ }
+ return true
+}
+
+func GetBackendState(state string) (BackendState, error) {
+ switch strings.ToLower(state) {
+ case models.BackendAddressStateActive, "":
+ return BackendStateActive, nil
+ case models.BackendAddressStateTerminating:
+ return BackendStateTerminating, nil
+ case models.BackendAddressStateQuarantined:
+ return BackendStateQuarantined, nil
+ case models.BackendAddressStateMaintenance:
+ return BackendStateMaintenance, nil
+ default:
+ return BackendStateInvalid, fmt.Errorf("invalid backend state %s", state)
+ }
+}
+
+func (state BackendState) String() (string, error) {
+ switch state {
+ case BackendStateActive:
+ return models.BackendAddressStateActive, nil
+ case BackendStateTerminating:
+ return models.BackendAddressStateTerminating, nil
+ case BackendStateQuarantined:
+ return models.BackendAddressStateQuarantined, nil
+ case BackendStateMaintenance:
+ return models.BackendAddressStateMaintenance, nil
+ default:
+ return "", fmt.Errorf("invalid backend state %d", state)
+ }
+}
+
+func IsValidBackendState(state string) bool {
+ _, err := GetBackendState(state)
+
+ return err == nil
+}
+
+func NewL4Type(name string) (L4Type, error) {
+ switch strings.ToLower(name) {
+ case "tcp":
+ return TCP, nil
+ case "udp":
+ return UDP, nil
+ case "sctp":
+ return SCTP, nil
+ default:
+ return "", fmt.Errorf("unknown L4 protocol")
+ }
+}
+
+// L4Addr is an abstraction for the backend port with a L4Type, usually tcp or udp, and
+// the Port number.
+//
+// +deepequal-gen=true
+// +deepequal-gen:private-method=true
+type L4Addr struct {
+ Protocol L4Type
+ Port uint16
+}
+
+// DeepEqual returns true if both the receiver and 'o' are deeply equal.
+func (l *L4Addr) DeepEqual(o *L4Addr) bool {
+ if l == nil {
+ return o == nil
+ }
+ return l.deepEqual(o)
+}
+
+// NewL4Addr creates a new L4Addr.
+func NewL4Addr(protocol L4Type, number uint16) *L4Addr {
+ return &L4Addr{Protocol: protocol, Port: number}
+}
+
+// L3n4Addr is used to store, as an unique L3+L4 address in the KVStore. It also
+// includes the lookup scope for frontend addresses which is used in service
+// handling for externalTrafficPolicy=Local and internalTrafficPolicy=Local,
+// that is, Scope{External,Internal}.
+//
+// +deepequal-gen=true
+// +deepequal-gen:private-method=true
+type L3n4Addr struct {
+ AddrCluster cmtypes.AddrCluster
+ L4Addr
+ Scope uint8
+}
+
+// DeepEqual returns true if both the receiver and 'o' are deeply equal.
+func (l *L3n4Addr) DeepEqual(o *L3n4Addr) bool {
+ if l == nil {
+ return o == nil
+ }
+ return l.AddrCluster.Equal(o.AddrCluster) && l.deepEqual(o)
+}
+
+// NewL3n4Addr creates a new L3n4Addr.
+func NewL3n4Addr(protocol L4Type, addrCluster cmtypes.AddrCluster, portNumber uint16, scope uint8) *L3n4Addr {
+ lbport := NewL4Addr(protocol, portNumber)
+
+ addr := L3n4Addr{AddrCluster: addrCluster, L4Addr: *lbport, Scope: scope}
+
+ return &addr
+}
+
+func NewL3n4AddrFromModel(base *models.FrontendAddress) (*L3n4Addr, error) {
+ var scope uint8
+
+ if base == nil {
+ return nil, nil
+ }
+
+ if base.IP == "" {
+ return nil, fmt.Errorf("missing IP address")
+ }
+
+ proto := NONE
+ if base.Protocol != "" {
+ p, err := NewL4Type(base.Protocol)
+ if err != nil {
+ return nil, err
+ }
+ proto = p
+ }
+
+ l4addr := NewL4Addr(proto, base.Port)
+ addrCluster, err := cmtypes.ParseAddrCluster(base.IP)
+ if err != nil {
+ return nil, err
+ }
+
+ if base.Scope == models.FrontendAddressScopeExternal {
+ scope = ScopeExternal
+ } else if base.Scope == models.FrontendAddressScopeInternal {
+ scope = ScopeInternal
+ } else {
+ return nil, fmt.Errorf("invalid scope \"%s\"", base.Scope)
+ }
+
+ return &L3n4Addr{AddrCluster: addrCluster, L4Addr: *l4addr, Scope: scope}, nil
+}
+
+// NewBackend creates the Backend struct instance from given params.
+// The default state for the returned Backend is BackendStateActive.
+func NewBackend(id BackendID, protocol L4Type, addrCluster cmtypes.AddrCluster, portNumber uint16) *Backend {
+ lbport := NewL4Addr(protocol, portNumber)
+ b := Backend{
+ ID: id,
+ L3n4Addr: L3n4Addr{AddrCluster: addrCluster, L4Addr: *lbport},
+ State: BackendStateActive,
+ Preferred: Preferred(false),
+ Weight: DefaultBackendWeight,
+ }
+
+ return &b
+}
+
+// NewBackendWithState creates the Backend struct instance from given params.
+func NewBackendWithState(id BackendID, protocol L4Type, addrCluster cmtypes.AddrCluster, portNumber uint16,
+ state BackendState) *Backend {
+ lbport := NewL4Addr(protocol, portNumber)
+ b := Backend{
+ ID: id,
+ L3n4Addr: L3n4Addr{AddrCluster: addrCluster, L4Addr: *lbport},
+ State: state,
+ Weight: DefaultBackendWeight,
+ }
+
+ return &b
+}
+
+func NewBackendFromBackendModel(base *models.BackendAddress) (*Backend, error) {
+ if base.IP == nil {
+ return nil, fmt.Errorf("missing IP address")
+ }
+
+ // FIXME: Should this be NONE ?
+ l4addr := NewL4Addr(NONE, base.Port)
+ addrCluster, err := cmtypes.ParseAddrCluster(*base.IP)
+ if err != nil {
+ return nil, err
+ }
+ state, err := GetBackendState(base.State)
+ if err != nil {
+ return nil, fmt.Errorf("invalid backend state [%s]", base.State)
+ }
+
+ b := &Backend{
+ NodeName: base.NodeName,
+ L3n4Addr: L3n4Addr{AddrCluster: addrCluster, L4Addr: *l4addr},
+ State: state,
+ Preferred: Preferred(base.Preferred),
+ }
+
+ if base.Weight != nil {
+ b.Weight = *base.Weight
+ }
+
+ if b.Weight == 0 {
+ b.State = BackendStateMaintenance
+ }
+
+ return b, nil
+}
+
+func NewL3n4AddrFromBackendModel(base *models.BackendAddress) (*L3n4Addr, error) {
+ if base.IP == nil {
+ return nil, fmt.Errorf("missing IP address")
+ }
+
+ // FIXME: Should this be NONE ?
+ l4addr := NewL4Addr(NONE, base.Port)
+ addrCluster, err := cmtypes.ParseAddrCluster(*base.IP)
+ if err != nil {
+ return nil, err
+ }
+ return &L3n4Addr{AddrCluster: addrCluster, L4Addr: *l4addr}, nil
+}
+
+func (a *L3n4Addr) GetModel() *models.FrontendAddress {
+ if a == nil {
+ return nil
+ }
+
+ scope := models.FrontendAddressScopeExternal
+ if a.Scope == ScopeInternal {
+ scope = models.FrontendAddressScopeInternal
+ }
+ return &models.FrontendAddress{
+ IP: a.AddrCluster.String(),
+ Port: a.Port,
+ Scope: scope,
+ }
+}
+
+func (b *Backend) GetBackendModel() *models.BackendAddress {
+ if b == nil {
+ return nil
+ }
+
+ addrClusterStr := b.AddrCluster.String()
+ stateStr, _ := b.State.String()
+ return &models.BackendAddress{
+ IP: &addrClusterStr,
+ Port: b.Port,
+ NodeName: b.NodeName,
+ State: stateStr,
+ Preferred: bool(b.Preferred),
+ Weight: &b.Weight,
+ }
+}
+
+// String returns the L3n4Addr in the "IPv4:Port[/Scope]" format for IPv4 and
+// "[IPv6]:Port[/Scope]" format for IPv6.
+func (a *L3n4Addr) String() string {
+ var scope string
+ if a.Scope == ScopeInternal {
+ scope = "/i"
+ }
+ if a.IsIPv6() {
+ return "[" + a.AddrCluster.String() + "]:" + strconv.FormatUint(uint64(a.Port), 10) + scope
+ }
+ return a.AddrCluster.String() + ":" + strconv.FormatUint(uint64(a.Port), 10) + scope
+}
+
+// StringWithProtocol returns the L3n4Addr in the "IPv4:Port/Protocol[/Scope]"
+// format for IPv4 and "[IPv6]:Port/Protocol[/Scope]" format for IPv6.
+func (a *L3n4Addr) StringWithProtocol() string {
+ var scope string
+ if a.Scope == ScopeInternal {
+ scope = "/i"
+ }
+ if a.IsIPv6() {
+ return "[" + a.AddrCluster.String() + "]:" + strconv.FormatUint(uint64(a.Port), 10) + "/" + a.Protocol + scope
+ }
+ return a.AddrCluster.String() + ":" + strconv.FormatUint(uint64(a.Port), 10) + "/" + a.Protocol + scope
+}
+
+// StringID returns the L3n4Addr as string to be used for unique identification
+func (a *L3n4Addr) StringID() string {
+ // This does not include the protocol right now as the datapath does
+ // not include the protocol in the lookup of the service IP.
+ return a.String()
+}
+
+// Hash calculates a unique string of the L3n4Addr e.g for use as a key in maps.
+// Note: the resulting string is meant to be used as a key for maps and is not
+// readable by a human eye when printed out.
+func (a L3n4Addr) Hash() string {
+ const lenProto = 0 // proto is omitted for now
+ const lenScope = 1 // scope is uint8 which is an alias for byte
+ const lenPort = 2 // port is uint16 which is 2 bytes
+
+ b := make([]byte, cmtypes.AddrClusterLen+lenProto+lenScope+lenPort)
+ ac20 := a.AddrCluster.As20()
+ copy(b, ac20[:])
+ // FIXME: add Protocol once we care about protocols
+ // scope is a uint8 which is an alias for byte so a cast is safe
+ b[net.IPv6len+lenProto] = byte(a.Scope)
+ // port is a uint16, so 2 bytes
+ b[net.IPv6len+lenProto+lenScope] = byte(a.Port >> 8)
+ b[net.IPv6len+lenProto+lenScope+1] = byte(a.Port & 0xff)
+ return string(b)
+}
+
+// IsIPv6 returns true if the IP address in the given L3n4Addr is IPv6 or not.
+func (a *L3n4Addr) IsIPv6() bool {
+ return a.AddrCluster.Is6()
+}
+
+// L3n4AddrID is used to store, as an unique L3+L4 plus the assigned ID, in the
+// KVStore.
+//
+// +deepequal-gen=true
+// +deepequal-gen:private-method=true
+type L3n4AddrID struct {
+ L3n4Addr
+ ID ID
+}
+
+// DeepEqual returns true if both the receiver and 'o' are deeply equal.
+func (l *L3n4AddrID) DeepEqual(o *L3n4AddrID) bool {
+ if l == nil {
+ return o == nil
+ }
+ return l.deepEqual(o)
+}
+
+// NewL3n4AddrID creates a new L3n4AddrID.
+func NewL3n4AddrID(protocol L4Type, addrCluster cmtypes.AddrCluster, portNumber uint16, scope uint8, id ID) *L3n4AddrID {
+ l3n4Addr := NewL3n4Addr(protocol, addrCluster, portNumber, scope)
+ return &L3n4AddrID{L3n4Addr: *l3n4Addr, ID: id}
+}
+
+// IsIPv6 returns true if the IP address in L3n4Addr's L3n4AddrID is IPv6 or not.
+func (l *L3n4AddrID) IsIPv6() bool {
+ return l.L3n4Addr.IsIPv6()
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/loadbalancer/zz_generated.deepcopy.go b/vendor/github.com/cilium/cilium/pkg/loadbalancer/zz_generated.deepcopy.go
new file mode 100644
index 000000000..45d5cd88c
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/loadbalancer/zz_generated.deepcopy.go
@@ -0,0 +1,157 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package loadbalancer
+
+import (
+ cidr "github.com/cilium/cilium/pkg/cidr"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Backend) DeepCopyInto(out *Backend) {
+ *out = *in
+ in.L3n4Addr.DeepCopyInto(&out.L3n4Addr)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Backend.
+func (in *Backend) DeepCopy() *Backend {
+ if in == nil {
+ return nil
+ }
+ out := new(Backend)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *L3n4Addr) DeepCopyInto(out *L3n4Addr) {
+ *out = *in
+ in.AddrCluster.DeepCopyInto(&out.AddrCluster)
+ out.L4Addr = in.L4Addr
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new L3n4Addr.
+func (in *L3n4Addr) DeepCopy() *L3n4Addr {
+ if in == nil {
+ return nil
+ }
+ out := new(L3n4Addr)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *L3n4AddrID) DeepCopyInto(out *L3n4AddrID) {
+ *out = *in
+ in.L3n4Addr.DeepCopyInto(&out.L3n4Addr)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new L3n4AddrID.
+func (in *L3n4AddrID) DeepCopy() *L3n4AddrID {
+ if in == nil {
+ return nil
+ }
+ out := new(L3n4AddrID)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *L4Addr) DeepCopyInto(out *L4Addr) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new L4Addr.
+func (in *L4Addr) DeepCopy() *L4Addr {
+ if in == nil {
+ return nil
+ }
+ out := new(L4Addr)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SVC) DeepCopyInto(out *SVC) {
+ *out = *in
+ in.Frontend.DeepCopyInto(&out.Frontend)
+ if in.Backends != nil {
+ in, out := &in.Backends, &out.Backends
+ *out = make([]*Backend, len(*in))
+ for i := range *in {
+ if (*in)[i] != nil {
+ in, out := &(*in)[i], &(*out)[i]
+ *out = new(Backend)
+ (*in).DeepCopyInto(*out)
+ }
+ }
+ }
+ out.Name = in.Name
+ if in.LoadBalancerSourceRanges != nil {
+ in, out := &in.LoadBalancerSourceRanges, &out.LoadBalancerSourceRanges
+ *out = make([]*cidr.CIDR, len(*in))
+ for i := range *in {
+ if (*in)[i] != nil {
+ in, out := &(*in)[i], &(*out)[i]
+ *out = (*in).DeepCopy()
+ }
+ }
+ }
+ if in.L7LBFrontendPorts != nil {
+ in, out := &in.L7LBFrontendPorts, &out.L7LBFrontendPorts
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SVC.
+func (in *SVC) DeepCopy() *SVC {
+ if in == nil {
+ return nil
+ }
+ out := new(SVC)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ServiceName) DeepCopyInto(out *ServiceName) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceName.
+func (in *ServiceName) DeepCopy() *ServiceName {
+ if in == nil {
+ return nil
+ }
+ out := new(ServiceName)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SvcFlagParam) DeepCopyInto(out *SvcFlagParam) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SvcFlagParam.
+func (in *SvcFlagParam) DeepCopy() *SvcFlagParam {
+ if in == nil {
+ return nil
+ }
+ out := new(SvcFlagParam)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/loadbalancer/zz_generated.deepequal.go b/vendor/github.com/cilium/cilium/pkg/loadbalancer/zz_generated.deepequal.go
new file mode 100644
index 000000000..7290c0d25
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/loadbalancer/zz_generated.deepequal.go
@@ -0,0 +1,66 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by deepequal-gen. DO NOT EDIT.
+
+package loadbalancer
+
+// deepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *L3n4Addr) deepEqual(other *L3n4Addr) bool {
+ if other == nil {
+ return false
+ }
+
+ if !in.AddrCluster.DeepEqual(&other.AddrCluster) {
+ return false
+ }
+
+ if in.L4Addr != other.L4Addr {
+ return false
+ }
+
+ if in.Scope != other.Scope {
+ return false
+ }
+
+ return true
+}
+
+// deepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *L3n4AddrID) deepEqual(other *L3n4AddrID) bool {
+ if other == nil {
+ return false
+ }
+
+ if !in.L3n4Addr.DeepEqual(&other.L3n4Addr) {
+ return false
+ }
+
+ if in.ID != other.ID {
+ return false
+ }
+
+ return true
+}
+
+// deepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *L4Addr) deepEqual(other *L4Addr) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.Protocol != other.Protocol {
+ return false
+ }
+ if in.Port != other.Port {
+ return false
+ }
+
+ return true
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/lock/sortable_mutex.go b/vendor/github.com/cilium/cilium/pkg/lock/sortable_mutex.go
new file mode 100644
index 000000000..3b700bdfb
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/lock/sortable_mutex.go
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package lock
+
+import (
+ "sort"
+ "sync"
+ "sync/atomic"
+ "time"
+)
+
+// sortableMutexSeq is a global sequence counter for the creation of new
+// SortableMutex's with unique sequence numbers.
+var sortableMutexSeq atomic.Uint64
+
+// sortableMutex implements SortableMutex. Not exported as the only way to
+// initialize it is via NewSortableMutex().
+type sortableMutex struct {
+ sync.Mutex
+ seq uint64
+ acquireDuration time.Duration
+}
+
+func (s *sortableMutex) Lock() {
+ start := time.Now()
+ s.Mutex.Lock()
+ s.acquireDuration += time.Since(start)
+}
+
+func (s *sortableMutex) Seq() uint64 { return s.seq }
+
+func (s *sortableMutex) AcquireDuration() time.Duration { return s.acquireDuration }
+
+// SortableMutex provides a Mutex that can be globally sorted with other
+// sortable mutexes. This allows deadlock-safe locking of a set of mutexes
+// as it guarantees consistent lock ordering.
+type SortableMutex interface {
+ sync.Locker
+ Seq() uint64
+ AcquireDuration() time.Duration // The amount of time it took to acquire the lock
+}
+
+// SortableMutexes is a set of mutexes that can be locked in a safe order.
+// Once Lock() is called it must not be mutated!
+type SortableMutexes []SortableMutex
+
+// Len implements sort.Interface.
+func (s SortableMutexes) Len() int {
+ return len(s)
+}
+
+// Less implements sort.Interface.
+func (s SortableMutexes) Less(i int, j int) bool {
+ return s[i].Seq() < s[j].Seq()
+}
+
+// Swap implements sort.Interface.
+func (s SortableMutexes) Swap(i int, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+// Lock sorts the mutexes, and then locks them in order. If any lock cannot be acquired,
+// this will block while holding the locks with a lower sequence number.
+func (s SortableMutexes) Lock() {
+ sort.Sort(s)
+ for _, mu := range s {
+ mu.Lock()
+ }
+}
+
+// Unlock locks the sorted set of mutexes locked by prior call to Lock().
+func (s SortableMutexes) Unlock() {
+ for _, mu := range s {
+ mu.Unlock()
+ }
+}
+
+var _ sort.Interface = SortableMutexes{}
+
+func NewSortableMutex() SortableMutex {
+ seq := sortableMutexSeq.Add(1)
+ return &sortableMutex{
+ seq: seq,
+ }
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/lock/stoppable_waitgroup.go b/vendor/github.com/cilium/cilium/pkg/lock/stoppable_waitgroup.go
index 219b9c83e..090c351af 100644
--- a/vendor/github.com/cilium/cilium/pkg/lock/stoppable_waitgroup.go
+++ b/vendor/github.com/cilium/cilium/pkg/lock/stoppable_waitgroup.go
@@ -14,7 +14,7 @@ type StoppableWaitGroup struct {
noopAdd chan struct{}
// i is the internal counter which can store tolerate negative values
// as opposed the golang's library WaitGroup.
- i *int64
+ i atomic.Int64
doneOnce, stopOnce sync.Once
}
@@ -24,7 +24,6 @@ func NewStoppableWaitGroup() *StoppableWaitGroup {
return &StoppableWaitGroup{
noopDone: make(chan struct{}),
noopAdd: make(chan struct{}),
- i: func() *int64 { i := int64(0); return &i }(),
doneOnce: sync.Once{},
stopOnce: sync.Once{},
}
@@ -65,7 +64,7 @@ func (l *StoppableWaitGroup) Add() {
select {
case <-l.noopAdd:
default:
- atomic.AddInt64(l.i, 1)
+ l.i.Add(1)
}
}
@@ -80,14 +79,14 @@ func (l *StoppableWaitGroup) Done() {
default:
select {
case <-l.noopAdd:
- a := atomic.AddInt64(l.i, -1)
+ a := l.i.Add(-1)
if a <= 0 {
l.doneOnce.Do(func() {
close(l.noopDone)
})
}
default:
- a := atomic.AddInt64(l.i, -1)
+ a := l.i.Add(-1)
select {
// in case the channel was close while we where in this default
// case we will need to check if 'a' is less than zero and close
diff --git a/vendor/github.com/cilium/cilium/pkg/logging/logfields/logfields.go b/vendor/github.com/cilium/cilium/pkg/logging/logfields/logfields.go
index f0bc6e1e6..b58a2b4ef 100644
--- a/vendor/github.com/cilium/cilium/pkg/logging/logfields/logfields.go
+++ b/vendor/github.com/cilium/cilium/pkg/logging/logfields/logfields.go
@@ -35,9 +35,15 @@ const (
// EventUUID is an event unique identifier
EventUUID = "eventID"
+ // CNIAttachmentID uniquely identifies an endpoint
+ CNIAttachmentID = "cniAttachmentID"
+
// ContainerID is the container identifier
ContainerID = "containerID"
+ // ContainerInterface is the name of the interface in the container namespace
+ ContainerInterface = "containerInterface"
+
// IdentityLabels are the labels relevant for the security identity
IdentityLabels = "identityLabels"
@@ -214,7 +220,7 @@ const (
// NewCIDR is the new subnet/CIDR
NewCIDR = "newCIDR"
- // IPAddrs is a lsit of IP addrs
+ // IPAddrs is a list of IP addrs
IPAddrs = "ipAddrs"
// MTU is the maximum transmission unit of one interface
@@ -332,9 +338,6 @@ const (
// BPFClockSource denotes the internal clock source (ktime vs jiffies)
BPFClockSource = "bpfClockSource"
- // BPFInsnSet denotes the instruction set version
- BPFInsnSet = "bpfInsnSet"
-
// CiliumLocalRedirectPolicyName is the name of a CiliumLocalRedirectPolicy
CiliumLocalRedirectName = "ciliumLocalRedirectPolicyName"
@@ -650,9 +653,6 @@ const (
// WorkQueueSyncBackoff is the backoff time used by workqueues before an attempt to retry sync with k8s-apiserver.
WorkQueueSyncBackOff = "workQueueSyncBackOff"
- // CESSliceMode indicates the name of algorithm used to batch CEPs in a CES.
- CESSliceMode = "ciliumEndpointSliceMode"
-
// SourceIP is a source IP
SourceIP = "sourceIP"
diff --git a/vendor/github.com/cilium/cilium/pkg/metrics/bpf.go b/vendor/github.com/cilium/cilium/pkg/metrics/bpf.go
new file mode 100644
index 000000000..ec5816542
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/metrics/bpf.go
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package metrics
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "os/exec"
+ "time"
+
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/sirupsen/logrus"
+)
+
+type bpfCollector struct {
+ bpfMapsMemory *prometheus.Desc
+ bpfProgMemory *prometheus.Desc
+}
+
+func newbpfCollector() *bpfCollector {
+ return &bpfCollector{
+ bpfMapsMemory: prometheus.NewDesc(
+ prometheus.BuildFQName(Namespace, "", "bpf_maps_virtual_memory_max_bytes"),
+ "BPF maps kernel max memory usage size in bytes.",
+ nil, nil,
+ ),
+ bpfProgMemory: prometheus.NewDesc(
+ prometheus.BuildFQName(Namespace, "", "bpf_progs_virtual_memory_max_bytes"),
+ "BPF programs kernel max memory usage size in bytes.",
+ nil, nil,
+ ),
+ }
+}
+
+func (s *bpfCollector) Describe(ch chan<- *prometheus.Desc) {
+ ch <- s.bpfMapsMemory
+ ch <- s.bpfProgMemory
+}
+
+type memoryEntry struct {
+ BytesMemlock uint64 `json:"bytes_memlock"`
+}
+
+func getMemoryUsage(typ string) (uint64, error) {
+ ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+ defer cancel()
+ cmd := exec.CommandContext(ctx, "bpftool", "-j", typ, "show")
+ out, err := cmd.Output()
+ if err != nil {
+ return 0, fmt.Errorf("unable to get bpftool output: %w", err)
+ }
+
+ var memoryEntries []memoryEntry
+ err = json.Unmarshal(out, &memoryEntries)
+ if err != nil {
+ return 0, fmt.Errorf("unable to unmarshal bpftool output: %w", err)
+ }
+ var totalMem uint64
+ for _, entry := range memoryEntries {
+ totalMem += entry.BytesMemlock
+ }
+ return totalMem, nil
+}
+
+func (s *bpfCollector) Collect(ch chan<- prometheus.Metric) {
+ mapMem, err := getMemoryUsage("map")
+ if err != nil {
+ logrus.WithError(err).Error("Error while getting BPF maps memory usage")
+ } else {
+ ch <- prometheus.MustNewConstMetric(
+ s.bpfMapsMemory,
+ prometheus.GaugeValue,
+ float64(mapMem),
+ )
+ }
+
+ progMem, err := getMemoryUsage("prog")
+ if err != nil {
+ logrus.WithError(err).Error("Error while getting BPF progs memory usage")
+ } else {
+ ch <- prometheus.MustNewConstMetric(
+ s.bpfProgMemory,
+ prometheus.GaugeValue,
+ float64(progMem),
+ )
+ }
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/metrics/cell.go b/vendor/github.com/cilium/cilium/pkg/metrics/cell.go
new file mode 100644
index 000000000..395fe803d
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/metrics/cell.go
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package metrics
+
+import "github.com/cilium/cilium/pkg/hive/cell"
+
+var Cell = cell.Module("metrics", "Metrics",
+ // Provide registry to hive, but also invoke if case no cells decide to use as dependency
+ cell.Provide(NewRegistry),
+ cell.Invoke(func(_ *Registry) {}),
+ cell.Metric(NewLegacyMetrics),
+ cell.Config(defaultRegistryConfig),
+)
diff --git a/vendor/github.com/cilium/cilium/pkg/metrics/interfaces.go b/vendor/github.com/cilium/cilium/pkg/metrics/interfaces.go
new file mode 100644
index 000000000..016f2bc58
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/metrics/interfaces.go
@@ -0,0 +1,199 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package metrics
+
+import (
+ "github.com/cilium/cilium/api/v1/client/daemon"
+ "github.com/cilium/cilium/api/v1/health/client/connectivity"
+ metricpkg "github.com/cilium/cilium/pkg/metrics/metric"
+
+ "github.com/prometheus/client_golang/prometheus"
+ dto "github.com/prometheus/client_model/go"
+)
+
+type daemonHealthGetter interface {
+ GetHealthz(params *daemon.GetHealthzParams, opts ...daemon.ClientOption) (*daemon.GetHealthzOK, error)
+}
+
+type connectivityStatusGetter interface {
+ GetStatus(params *connectivity.GetStatusParams, opts ...connectivity.ClientOption) (*connectivity.GetStatusOK, error)
+}
+
+var (
+ NoOpMetric prometheus.Metric = &mockMetric{}
+ NoOpCollector prometheus.Collector = &collector{}
+
+ NoOpCounter metricpkg.Counter = &counter{NoOpMetric, NoOpCollector}
+ NoOpCounterVec metricpkg.Vec[metricpkg.Counter] = &counterVec{NoOpCollector}
+ NoOpObserver metricpkg.Observer = &observer{}
+ NoOpHistogram metricpkg.Histogram = &histogram{NoOpCollector}
+ NoOpObserverVec metricpkg.Vec[metricpkg.Observer] = &observerVec{NoOpCollector}
+ NoOpGauge metricpkg.Gauge = &gauge{NoOpMetric, NoOpCollector}
+ NoOpGaugeVec metricpkg.Vec[metricpkg.Gauge] = &gaugeVec{NoOpCollector}
+ NoOpGaugeDeletableVec metricpkg.DeletableVec[metricpkg.Gauge] = &gaugeDeletableVec{gaugeVec{NoOpCollector}}
+)
+
+// Metric
+
+type mockMetric struct{}
+
+// *WARNING*: Desc returns nil so do not register this metric into prometheus
+// default register.
+func (m *mockMetric) Desc() *prometheus.Desc { return nil }
+func (m *mockMetric) Write(*dto.Metric) error { return nil }
+
+// Collector
+
+type collector struct{}
+
+func (c *collector) Describe(chan<- *prometheus.Desc) {}
+func (c *collector) Collect(chan<- prometheus.Metric) {}
+
+// Counter
+
+type counter struct {
+ prometheus.Metric
+ prometheus.Collector
+}
+
+func (cv *counter) Add(float64) {}
+func (cv *counter) Get() float64 { return 0 }
+func (cv *counter) Inc() {}
+func (cv *counter) IsEnabled() bool { return false }
+func (cv *counter) SetEnabled(bool) {}
+func (cv *counter) Opts() metricpkg.Opts { return metricpkg.Opts{} }
+
+// CounterVec
+
+type counterVec struct{ prometheus.Collector }
+
+func (cv *counterVec) With(prometheus.Labels) metricpkg.Counter { return NoOpGauge }
+func (cv *counterVec) WithLabelValues(...string) metricpkg.Counter { return NoOpGauge }
+
+func (cv *counterVec) CurryWith(prometheus.Labels) (metricpkg.Vec[metricpkg.Counter], error) {
+ return NoOpCounterVec, nil
+}
+func (cv *counterVec) MustCurryWith(prometheus.Labels) metricpkg.Vec[metricpkg.Counter] {
+ return NoOpCounterVec
+}
+func (cv *counterVec) GetMetricWith(prometheus.Labels) (metricpkg.Counter, error) {
+ return NoOpCounter, nil
+}
+func (cv *counterVec) GetMetricWithLabelValues(...string) (metricpkg.Counter, error) {
+ return NoOpCounter, nil
+}
+func (cv *counterVec) IsEnabled() bool { return false }
+func (cv *counterVec) SetEnabled(bool) {}
+func (cv *counterVec) Opts() metricpkg.Opts { return metricpkg.Opts{} }
+
+// Observer
+
+type observer struct{}
+
+func (o *observer) Observe(float64) {}
+func (o *observer) IsEnabled() bool { return false }
+func (o *observer) SetEnabled(bool) {}
+func (o *observer) Opts() metricpkg.Opts { return metricpkg.Opts{} }
+
+// Histogram
+
+type histogram struct {
+ prometheus.Collector
+}
+
+func (h *histogram) Observe(float64) {}
+
+func (h *histogram) Desc() *prometheus.Desc { return nil }
+func (h *histogram) Write(*dto.Metric) error { return nil }
+func (h *histogram) IsEnabled() bool { return false }
+func (h *histogram) SetEnabled(bool) {}
+func (h *histogram) Opts() metricpkg.Opts { return metricpkg.Opts{} }
+
+// ObserverVec
+
+type observerVec struct {
+ prometheus.Collector
+}
+
+func (ov *observerVec) GetMetricWith(prometheus.Labels) (metricpkg.Observer, error) {
+ return NoOpObserver, nil
+}
+func (ov *observerVec) GetMetricWithLabelValues(lvs ...string) (metricpkg.Observer, error) {
+ return NoOpObserver, nil
+}
+
+func (ov *observerVec) With(prometheus.Labels) metricpkg.Observer { return NoOpObserver }
+func (ov *observerVec) WithLabelValues(...string) metricpkg.Observer { return NoOpObserver }
+
+func (ov *observerVec) CurryWith(prometheus.Labels) (metricpkg.Vec[metricpkg.Observer], error) {
+ return NoOpObserverVec, nil
+}
+func (ov *observerVec) MustCurryWith(prometheus.Labels) metricpkg.Vec[metricpkg.Observer] {
+ return NoOpObserverVec
+}
+
+func (ov *observerVec) IsEnabled() bool { return false }
+func (ov *observerVec) SetEnabled(bool) {}
+func (ov *observerVec) Opts() metricpkg.Opts { return metricpkg.Opts{} }
+
+// Gauge
+
+type gauge struct {
+ prometheus.Metric
+ prometheus.Collector
+}
+
+func (g *gauge) Set(float64) {}
+func (g *gauge) Get() float64 { return 0 }
+func (g *gauge) Inc() {}
+func (g *gauge) Dec() {}
+func (g *gauge) Add(float64) {}
+func (g *gauge) Sub(float64) {}
+func (g *gauge) SetToCurrentTime() {}
+func (g *gauge) IsEnabled() bool { return false }
+func (g *gauge) SetEnabled(bool) {}
+func (g *gauge) Opts() metricpkg.Opts { return metricpkg.Opts{} }
+
+// GaugeVec
+
+type gaugeDeletableVec struct {
+ gaugeVec
+}
+
+func (*gaugeDeletableVec) Delete(ll prometheus.Labels) bool {
+ return false
+}
+
+func (*gaugeDeletableVec) DeleteLabelValues(lvs ...string) bool {
+ return false
+}
+
+func (*gaugeDeletableVec) DeletePartialMatch(labels prometheus.Labels) int {
+ return 0
+}
+
+func (*gaugeDeletableVec) Reset() {}
+
+type gaugeVec struct {
+ prometheus.Collector
+}
+
+func (gv *gaugeVec) With(prometheus.Labels) metricpkg.Gauge { return NoOpGauge }
+func (gv *gaugeVec) WithLabelValues(...string) metricpkg.Gauge { return NoOpGauge }
+
+func (gv *gaugeVec) CurryWith(prometheus.Labels) (metricpkg.Vec[metricpkg.Gauge], error) {
+ return NoOpGaugeVec, nil
+}
+func (gv *gaugeVec) MustCurryWith(prometheus.Labels) metricpkg.Vec[metricpkg.Gauge] {
+ return NoOpGaugeVec
+}
+func (gv *gaugeVec) GetMetricWith(prometheus.Labels) (metricpkg.Gauge, error) {
+ return NoOpGauge, nil
+}
+func (gv *gaugeVec) GetMetricWithLabelValues(...string) (metricpkg.Gauge, error) {
+ return NoOpGauge, nil
+}
+func (gv *gaugeVec) IsEnabled() bool { return false }
+func (gv *gaugeVec) SetEnabled(bool) {}
+func (gv *gaugeVec) Opts() metricpkg.Opts { return metricpkg.Opts{} }
diff --git a/vendor/github.com/cilium/cilium/pkg/metrics/logging_hook.go b/vendor/github.com/cilium/cilium/pkg/metrics/logging_hook.go
new file mode 100644
index 000000000..62c368ece
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/metrics/logging_hook.go
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package metrics
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/sirupsen/logrus"
+
+ "github.com/cilium/cilium/pkg/components"
+ "github.com/cilium/cilium/pkg/logging/logfields"
+ "github.com/cilium/cilium/pkg/metrics/metric"
+)
+
+// LoggingHook is a hook for logrus which counts error and warning messages as a
+// Prometheus metric.
+type LoggingHook struct {
+ metric metric.Vec[metric.Counter]
+}
+
+// NewLoggingHook returns a new instance of LoggingHook for the given Cilium
+// component.
+func NewLoggingHook(component string) *LoggingHook {
+ // NOTE(mrostecki): For now errors and warning metric exists only for Cilium
+ // daemon, but support of Prometheus metrics in some other components (i.e.
+ // cilium-health - GH-4268) is planned.
+
+ // Pick a metric for the component.
+ var metric metric.Vec[metric.Counter]
+ switch component {
+ case components.CiliumAgentName:
+ metric = ErrorsWarnings
+ case components.CiliumOperatortName:
+ metric = ErrorsWarnings
+ default:
+ panic(fmt.Sprintf("component %s is unsupported by LoggingHook", component))
+ }
+
+ return &LoggingHook{metric: metric}
+}
+
+// Levels returns the list of logging levels on which the hook is triggered.
+func (h *LoggingHook) Levels() []logrus.Level {
+ return []logrus.Level{
+ logrus.ErrorLevel,
+ logrus.WarnLevel,
+ }
+}
+
+// Fire is the main method which is called every time when logger has an error
+// or warning message.
+func (h *LoggingHook) Fire(entry *logrus.Entry) error {
+ // Get information about subsystem from logging entry field.
+ iSubsystem, ok := entry.Data[logfields.LogSubsys]
+ if !ok {
+ serializedEntry, err := entry.String()
+ if err != nil {
+ return fmt.Errorf("log entry cannot be serialized and doesn't contain 'subsys' field")
+ }
+ return fmt.Errorf("log entry doesn't contain 'subsys' field: %s", serializedEntry)
+ }
+ subsystem, ok := iSubsystem.(string)
+ if !ok {
+ return fmt.Errorf("type of the 'subsystem' log entry field is not string but %s", reflect.TypeOf(iSubsystem))
+ }
+
+ // Increment the metric.
+ h.metric.WithLabelValues(entry.Level.String(), subsystem).Inc()
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/metrics/metric/counter.go b/vendor/github.com/cilium/cilium/pkg/metrics/metric/counter.go
new file mode 100644
index 000000000..4755a468d
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/metrics/metric/counter.go
@@ -0,0 +1,166 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package metric
+
+import (
+ "github.com/prometheus/client_golang/prometheus"
+ dto "github.com/prometheus/client_model/go"
+)
+
+func NewCounter(opts CounterOpts) Counter {
+ return &counter{
+ Counter: prometheus.NewCounter(opts.toPrometheus()),
+ metric: metric{
+ enabled: !opts.Disabled,
+ opts: Opts(opts),
+ },
+ }
+}
+
+type Counter interface {
+ prometheus.Counter
+ WithMetadata
+
+ Get() float64
+}
+
+type counter struct {
+ prometheus.Counter
+ metric
+}
+
+func (c *counter) Collect(metricChan chan<- prometheus.Metric) {
+ if c.enabled {
+ c.Counter.Collect(metricChan)
+ }
+}
+
+func (c *counter) Get() float64 {
+ var pm dto.Metric
+ err := c.Counter.Write(&pm)
+ if err == nil {
+ return *pm.Counter.Value
+ }
+ return 0
+}
+
+// Inc increments the counter by 1. Use Add to increment it by arbitrary
+// non-negative values.
+func (c *counter) Inc() {
+ if c.enabled {
+ c.Counter.Inc()
+ }
+}
+
+// Add adds the given value to the counter. It panics if the value is < 0.
+func (c *counter) Add(val float64) {
+ if c.enabled {
+ c.Counter.Add(val)
+ }
+}
+
+func NewCounterVec(opts CounterOpts, labelNames []string) DeletableVec[Counter] {
+ return &counterVec{
+ CounterVec: prometheus.NewCounterVec(opts.toPrometheus(), labelNames),
+ metric: metric{
+ enabled: !opts.Disabled,
+ opts: Opts(opts),
+ },
+ }
+}
+
+type counterVec struct {
+ *prometheus.CounterVec
+ metric
+}
+
+func (cv *counterVec) CurryWith(labels prometheus.Labels) (Vec[Counter], error) {
+ vec, err := cv.CounterVec.CurryWith(labels)
+ if err == nil {
+ return &counterVec{CounterVec: vec, metric: cv.metric}, nil
+ }
+ return nil, err
+}
+
+func (cv *counterVec) GetMetricWith(labels prometheus.Labels) (Counter, error) {
+ if !cv.enabled {
+ return &counter{
+ metric: metric{enabled: false},
+ }, nil
+ }
+
+ promCounter, err := cv.CounterVec.GetMetricWith(labels)
+ if err == nil {
+ return &counter{
+ Counter: promCounter,
+ metric: cv.metric,
+ }, nil
+ }
+ return nil, err
+}
+
+func (cv *counterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) {
+ if !cv.enabled {
+ return &counter{
+ metric: metric{enabled: false},
+ }, nil
+ }
+
+ promCounter, err := cv.CounterVec.GetMetricWithLabelValues(lvs...)
+ if err == nil {
+ return &counter{
+ Counter: promCounter,
+ metric: cv.metric,
+ }, nil
+ }
+ return nil, err
+}
+
+func (cv *counterVec) With(labels prometheus.Labels) Counter {
+ if !cv.enabled {
+ return &counter{
+ metric: metric{enabled: false},
+ }
+ }
+
+ promCounter := cv.CounterVec.With(labels)
+ return &counter{
+ Counter: promCounter,
+ metric: cv.metric,
+ }
+}
+
+func (cv *counterVec) WithLabelValues(lvs ...string) Counter {
+ if !cv.enabled {
+ return &counter{
+ metric: metric{enabled: false},
+ }
+ }
+
+ promCounter := cv.CounterVec.WithLabelValues(lvs...)
+ return &counter{
+ Counter: promCounter,
+ metric: cv.metric,
+ }
+}
+
+func (cv *counterVec) SetEnabled(e bool) {
+ if !e {
+ cv.Reset()
+ }
+
+ cv.metric.SetEnabled(e)
+}
+
+type CounterOpts Opts
+
+func (co CounterOpts) toPrometheus() prometheus.CounterOpts {
+ return prometheus.CounterOpts{
+ Name: co.Name,
+ Namespace: co.Namespace,
+ Subsystem: co.Subsystem,
+ Help: co.Help,
+ ConstLabels: co.ConstLabels,
+ }
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/metrics/metric/gauge.go b/vendor/github.com/cilium/cilium/pkg/metrics/metric/gauge.go
new file mode 100644
index 000000000..445afde06
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/metrics/metric/gauge.go
@@ -0,0 +1,227 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package metric
+
+import (
+ "github.com/prometheus/client_golang/prometheus"
+ dto "github.com/prometheus/client_model/go"
+)
+
+func NewGauge(opts GaugeOpts) Gauge {
+ return &gauge{
+ Gauge: prometheus.NewGauge(opts.toPrometheus()),
+ metric: metric{
+ enabled: !opts.Disabled,
+ opts: Opts(opts),
+ },
+ }
+}
+
+type Gauge interface {
+ prometheus.Gauge
+ WithMetadata
+
+ Get() float64
+}
+
+type gauge struct {
+ prometheus.Gauge
+ metric
+}
+
+func (g *gauge) Collect(metricChan chan<- prometheus.Metric) {
+ if g.enabled {
+ g.Gauge.Collect(metricChan)
+ }
+}
+
+func (g *gauge) Get() float64 {
+ if !g.enabled {
+ return 0
+ }
+
+ var pm dto.Metric
+ err := g.Gauge.Write(&pm)
+ if err == nil {
+ return *pm.Gauge.Value
+ }
+ return 0
+}
+
+// Set sets the Gauge to an arbitrary value.
+func (g *gauge) Set(val float64) {
+ if g.enabled {
+ g.Gauge.Set(val)
+ }
+}
+
+// Inc increments the Gauge by 1. Use Add to increment it by arbitrary
+// values.
+func (g *gauge) Inc() {
+ if g.enabled {
+ g.Gauge.Inc()
+ }
+}
+
+// Dec decrements the Gauge by 1. Use Sub to decrement it by arbitrary
+// values.
+func (g *gauge) Dec() {
+ if g.enabled {
+ g.Gauge.Dec()
+ }
+}
+
+// Add adds the given value to the Gauge. (The value can be negative,
+// resulting in a decrease of the Gauge.)
+func (g *gauge) Add(val float64) {
+ if g.enabled {
+ g.Gauge.Add(val)
+ }
+}
+
+// Sub subtracts the given value from the Gauge. (The value can be
+// negative, resulting in an increase of the Gauge.)
+func (g *gauge) Sub(i float64) {
+ if g.enabled {
+ g.Gauge.Sub(i)
+ }
+}
+
+// SetToCurrentTime sets the Gauge to the current Unix time in seconds.
+func (g *gauge) SetToCurrentTime() {
+ if g.enabled {
+ g.Gauge.SetToCurrentTime()
+ }
+}
+
+func NewGaugeVec(opts GaugeOpts, labelNames []string) DeletableVec[Gauge] {
+ return &gaugeVec{
+ GaugeVec: prometheus.NewGaugeVec(opts.toPrometheus(), labelNames),
+ metric: metric{
+ enabled: !opts.Disabled,
+ opts: Opts(opts),
+ },
+ }
+}
+
+type gaugeVec struct {
+ *prometheus.GaugeVec
+ metric
+}
+
+func (gv *gaugeVec) CurryWith(labels prometheus.Labels) (Vec[Gauge], error) {
+ vec, err := gv.GaugeVec.CurryWith(labels)
+ if err == nil {
+ return &gaugeVec{GaugeVec: vec, metric: gv.metric}, nil
+ }
+ return nil, err
+}
+
+func (gv *gaugeVec) GetMetricWith(labels prometheus.Labels) (Gauge, error) {
+ if !gv.enabled {
+ return &gauge{
+ metric: metric{enabled: false},
+ }, nil
+ }
+
+ promGauge, err := gv.GaugeVec.GetMetricWith(labels)
+ if err == nil {
+ return &gauge{
+ Gauge: promGauge,
+ metric: gv.metric,
+ }, nil
+ }
+ return nil, err
+}
+
+func (gv *gaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) {
+ if !gv.enabled {
+ return &gauge{
+ metric: metric{enabled: false},
+ }, nil
+ }
+
+ promGauge, err := gv.GaugeVec.GetMetricWithLabelValues(lvs...)
+ if err == nil {
+ return &gauge{
+ Gauge: promGauge,
+ metric: gv.metric,
+ }, nil
+ }
+ return nil, err
+}
+
+func (gv *gaugeVec) With(labels prometheus.Labels) Gauge {
+ if !gv.enabled {
+ return &gauge{
+ metric: metric{enabled: false},
+ }
+ }
+
+ promGauge := gv.GaugeVec.With(labels)
+ return &gauge{
+ Gauge: promGauge,
+ metric: gv.metric,
+ }
+}
+
+func (gv *gaugeVec) WithLabelValues(lvs ...string) Gauge {
+ if !gv.enabled {
+ return &gauge{
+ metric: metric{enabled: false},
+ }
+ }
+
+ promGauge := gv.GaugeVec.WithLabelValues(lvs...)
+ return &gauge{
+ Gauge: promGauge,
+ metric: gv.metric,
+ }
+}
+
+func (gv *gaugeVec) SetEnabled(e bool) {
+ if !e {
+ gv.Reset()
+ }
+
+ gv.metric.SetEnabled(e)
+}
+
+type GaugeFunc interface {
+ prometheus.GaugeFunc
+ WithMetadata
+}
+
+func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc {
+ return &gaugeFunc{
+ GaugeFunc: prometheus.NewGaugeFunc(opts.toPrometheus(), function),
+ metric: metric{
+ enabled: !opts.Disabled,
+ opts: Opts(opts),
+ },
+ }
+}
+
+type gaugeFunc struct {
+ prometheus.GaugeFunc
+ metric
+}
+
+func (gf *gaugeFunc) Collect(metricChan chan<- prometheus.Metric) {
+ if gf.enabled {
+ gf.GaugeFunc.Collect(metricChan)
+ }
+}
+
+type GaugeOpts Opts
+
+func (o GaugeOpts) toPrometheus() prometheus.GaugeOpts {
+ return prometheus.GaugeOpts{
+ Namespace: o.Namespace,
+ Subsystem: o.Subsystem,
+ Name: o.Name,
+ Help: o.Help,
+ ConstLabels: o.ConstLabels,
+ }
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/metrics/metric/histogram.go b/vendor/github.com/cilium/cilium/pkg/metrics/metric/histogram.go
new file mode 100644
index 000000000..f1ddb526a
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/metrics/metric/histogram.go
@@ -0,0 +1,315 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package metric
+
+import (
+ "time"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+func NewHistogram(opts HistogramOpts) Histogram {
+ return &histogram{
+ Histogram: prometheus.NewHistogram(opts.toPrometheus()),
+ metric: metric{
+ enabled: !opts.Disabled,
+ opts: opts.opts(),
+ },
+ }
+}
+
+type Histogram interface {
+ prometheus.Histogram
+ WithMetadata
+}
+
+type histogram struct {
+ prometheus.Histogram
+ metric
+}
+
+func (h *histogram) Collect(metricChan chan<- prometheus.Metric) {
+ if h.enabled {
+ h.Histogram.Collect(metricChan)
+ }
+}
+
+// Observe adds a single observation to the histogram. Observations are
+// usually positive or zero. Negative observations are accepted but
+// prevent current versions of Prometheus from properly detecting
+// counter resets in the sum of observations. (The experimental Native
+// Histograms handle negative observations properly.) See
+// https://prometheus.io/docs/practices/histograms/#count-and-sum-of-observations
+// for details.
+func (h *histogram) Observe(val float64) {
+ if h.enabled {
+ h.Histogram.Observe(val)
+ }
+}
+
+type Observer interface {
+ prometheus.Observer
+ WithMetadata
+}
+
+type observer struct {
+ prometheus.Observer
+ metric
+}
+
+// Observe adds a single observation to the histogram. Observations are
+// usually positive or zero. Negative observations are accepted but
+// prevent current versions of Prometheus from properly detecting
+// counter resets in the sum of observations. (The experimental Native
+// Histograms handle negative observations properly.) See
+// https://prometheus.io/docs/practices/histograms/#count-and-sum-of-observations
+// for details.
+func (o *observer) Observe(val float64) {
+ if o.enabled {
+ o.Observer.Observe(val)
+ }
+}
+
+func NewHistogramVec(opts HistogramOpts, labelNames []string) Vec[Observer] {
+ return &histogramVec{
+ ObserverVec: prometheus.NewHistogramVec(opts.toPrometheus(), labelNames),
+ metric: metric{
+ enabled: !opts.Disabled,
+ opts: opts.opts(),
+ },
+ }
+}
+
+type histogramVec struct {
+ prometheus.ObserverVec
+ metric
+}
+
+func (cv *histogramVec) CurryWith(labels prometheus.Labels) (Vec[Observer], error) {
+ vec, err := cv.ObserverVec.CurryWith(labels)
+ if err == nil {
+ return &histogramVec{ObserverVec: vec, metric: cv.metric}, nil
+ }
+ return nil, err
+}
+
+func (cv *histogramVec) GetMetricWith(labels prometheus.Labels) (Observer, error) {
+ if !cv.enabled {
+ return &observer{
+ metric: metric{enabled: false},
+ }, nil
+ }
+
+ promObserver, err := cv.ObserverVec.GetMetricWith(labels)
+ if err == nil {
+ return &observer{
+ Observer: promObserver,
+ metric: cv.metric,
+ }, nil
+ }
+ return nil, err
+}
+
+func (cv *histogramVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) {
+ if !cv.enabled {
+ return &observer{
+ metric: metric{enabled: false},
+ }, nil
+ }
+
+ promObserver, err := cv.ObserverVec.GetMetricWithLabelValues(lvs...)
+ if err == nil {
+ return &observer{
+ Observer: promObserver,
+ metric: cv.metric,
+ }, nil
+ }
+ return nil, err
+}
+
+func (cv *histogramVec) With(labels prometheus.Labels) Observer {
+ if !cv.enabled {
+ return &observer{
+ metric: metric{enabled: false},
+ }
+ }
+
+ promObserver := cv.ObserverVec.With(labels)
+ return &observer{
+ Observer: promObserver,
+ metric: cv.metric,
+ }
+}
+
+func (cv *histogramVec) WithLabelValues(lvs ...string) Observer {
+ if !cv.enabled {
+ return &observer{
+ metric: metric{enabled: false},
+ }
+ }
+
+ promObserver := cv.ObserverVec.WithLabelValues(lvs...)
+ return &observer{
+ Observer: promObserver,
+ metric: cv.metric,
+ }
+}
+
+func (cv *histogramVec) SetEnabled(e bool) {
+ if !e {
+ if histVec, ok := cv.ObserverVec.(*prometheus.HistogramVec); ok {
+ histVec.Reset()
+ }
+ }
+
+ cv.metric.SetEnabled(e)
+}
+
+// HistogramOpts are a modified and expanded version of the prometheus.HistogramOpts.
+// https://pkg.go.dev/github.com/prometheus/client_golang/prometheus#HistogramOpts
+type HistogramOpts struct {
+ // Namespace, Subsystem, and Name are components of the fully-qualified
+ // name of the Histogram (created by joining these components with
+ // "_"). Only Name is mandatory, the others merely help structuring the
+ // name. Note that the fully-qualified name of the Histogram must be a
+ // valid Prometheus metric name.
+ Namespace string
+ Subsystem string
+ Name string
+
+ // Help provides information about this Histogram.
+ //
+ // Metrics with the same fully-qualified name must have the same Help
+ // string.
+ Help string
+
+ // ConstLabels are used to attach fixed labels to this metric. Metrics
+ // with the same fully-qualified name must have the same label names in
+ // their ConstLabels.
+ //
+ // ConstLabels are only used rarely. In particular, do not use them to
+ // attach the same labels to all your metrics. Those use cases are
+ // better covered by target labels set by the scraping Prometheus
+ // server, or by one specific metric (e.g. a build_info or a
+ // machine_role metric). See also
+ // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels
+ ConstLabels prometheus.Labels
+
+ // Buckets defines the buckets into which observations are counted. Each
+ // element in the slice is the upper inclusive bound of a bucket. The
+ // values must be sorted in strictly increasing order. There is no need
+ // to add a highest bucket with +Inf bound, it will be added
+ // implicitly. If Buckets is left as nil or set to a slice of length
+ // zero, it is replaced by default buckets. The default buckets are
+ // DefBuckets if no buckets for a native histogram (see below) are used,
+ // otherwise the default is no buckets. (In other words, if you want to
+ // use both reguler buckets and buckets for a native histogram, you have
+ // to define the regular buckets here explicitly.)
+ Buckets []float64
+
+ // If NativeHistogramBucketFactor is greater than one, so-called sparse
+ // buckets are used (in addition to the regular buckets, if defined
+ // above). A Histogram with sparse buckets will be ingested as a Native
+ // Histogram by a Prometheus server with that feature enabled (requires
+ // Prometheus v2.40+). Sparse buckets are exponential buckets covering
+ // the whole float64 range (with the exception of the “zero” bucket, see
+ // SparseBucketsZeroThreshold below). From any one bucket to the next,
+ // the width of the bucket grows by a constant
+ // factor. NativeHistogramBucketFactor provides an upper bound for this
+ // factor (exception see below). The smaller
+ // NativeHistogramBucketFactor, the more buckets will be used and thus
+ // the more costly the histogram will become. A generally good trade-off
+ // between cost and accuracy is a value of 1.1 (each bucket is at most
+ // 10% wider than the previous one), which will result in each power of
+ // two divided into 8 buckets (e.g. there will be 8 buckets between 1
+ // and 2, same as between 2 and 4, and 4 and 8, etc.).
+ //
+ // Details about the actually used factor: The factor is calculated as
+ // 2^(2^n), where n is an integer number between (and including) -8 and
+ // 4. n is chosen so that the resulting factor is the largest that is
+ // still smaller or equal to NativeHistogramBucketFactor. Note that the
+ // smallest possible factor is therefore approx. 1.00271 (i.e. 2^(2^-8)
+ // ). If NativeHistogramBucketFactor is greater than 1 but smaller than
+ // 2^(2^-8), then the actually used factor is still 2^(2^-8) even though
+ // it is larger than the provided NativeHistogramBucketFactor.
+ //
+ // NOTE: Native Histograms are still an experimental feature. Their
+ // behavior might still change without a major version
+ // bump. Subsequently, all NativeHistogram... options here might still
+ // change their behavior or name (or might completely disappear) without
+ // a major version bump.
+ NativeHistogramBucketFactor float64
+ // All observations with an absolute value of less or equal
+ // NativeHistogramZeroThreshold are accumulated into a “zero”
+ // bucket. For best results, this should be close to a bucket
+ // boundary. This is usually the case if picking a power of two. If
+ // NativeHistogramZeroThreshold is left at zero,
+ // DefSparseBucketsZeroThreshold is used as the threshold. To configure
+ // a zero bucket with an actual threshold of zero (i.e. only
+ // observations of precisely zero will go into the zero bucket), set
+ // NativeHistogramZeroThreshold to the NativeHistogramZeroThresholdZero
+ // constant (or any negative float value).
+ NativeHistogramZeroThreshold float64
+
+ // The remaining fields define a strategy to limit the number of
+ // populated sparse buckets. If NativeHistogramMaxBucketNumber is left
+ // at zero, the number of buckets is not limited. (Note that this might
+ // lead to unbounded memory consumption if the values observed by the
+ // Histogram are sufficiently wide-spread. In particular, this could be
+ // used as a DoS attack vector. Where the observed values depend on
+ // external inputs, it is highly recommended to set a
+ // NativeHistogramMaxBucketNumber.) Once the set
+ // NativeHistogramMaxBucketNumber is exceeded, the following strategy is
+ // enacted: First, if the last reset (or the creation) of the histogram
+ // is at least NativeHistogramMinResetDuration ago, then the whole
+ // histogram is reset to its initial state (including regular
+ // buckets). If less time has passed, or if
+ // NativeHistogramMinResetDuration is zero, no reset is
+ // performed. Instead, the zero threshold is increased sufficiently to
+ // reduce the number of buckets to or below
+ // NativeHistogramMaxBucketNumber, but not to more than
+ // NativeHistogramMaxZeroThreshold. Thus, if
+ // NativeHistogramMaxZeroThreshold is already at or below the current
+ // zero threshold, nothing happens at this step. After that, if the
+ // number of buckets still exceeds NativeHistogramMaxBucketNumber, the
+ // resolution of the histogram is reduced by doubling the width of the
+ // sparse buckets (up to a growth factor between one bucket to the next
+ // of 2^(2^4) = 65536, see above).
+ NativeHistogramMaxBucketNumber uint32
+ NativeHistogramMinResetDuration time.Duration
+ NativeHistogramMaxZeroThreshold float64
+
+ ConfigName string
+
+ // If true, the metric has to be explicitly enabled via config or flags
+ Disabled bool
+}
+
+func (ho HistogramOpts) opts() Opts {
+ return Opts{
+ Namespace: ho.Namespace,
+ Subsystem: ho.Subsystem,
+ Name: ho.Name,
+ Help: ho.Help,
+ ConstLabels: ho.ConstLabels,
+ ConfigName: ho.ConfigName,
+ Disabled: ho.Disabled,
+ }
+}
+
+func (ho HistogramOpts) toPrometheus() prometheus.HistogramOpts {
+ return prometheus.HistogramOpts{
+ Namespace: ho.Namespace,
+ Subsystem: ho.Subsystem,
+ Name: ho.Name,
+ Help: ho.Help,
+ ConstLabels: ho.ConstLabels,
+ Buckets: ho.Buckets,
+ NativeHistogramBucketFactor: ho.NativeHistogramBucketFactor,
+ NativeHistogramZeroThreshold: ho.NativeHistogramZeroThreshold,
+ NativeHistogramMaxBucketNumber: ho.NativeHistogramMaxBucketNumber,
+ NativeHistogramMinResetDuration: ho.NativeHistogramMinResetDuration,
+ NativeHistogramMaxZeroThreshold: ho.NativeHistogramMaxZeroThreshold,
+ }
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/metrics/metric/metric.go b/vendor/github.com/cilium/cilium/pkg/metrics/metric/metric.go
new file mode 100644
index 000000000..6ad520e0c
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/metrics/metric/metric.go
@@ -0,0 +1,198 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package metric
+
+import (
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+// WithMetadata is the interface implemented by any metric defined in this package. These typically embed existing
+// prometheus metric types and add additional metadata. In addition, these metrics have the concept of being enabled
+// or disabled which is used in place of conditional registration so all metric types can always be registered.
+type WithMetadata interface {
+ IsEnabled() bool
+ SetEnabled(bool)
+ Opts() Opts
+}
+
+// metric is a "base" structure which can be embedded to provide common functionality.
+type metric struct {
+ enabled bool
+ opts Opts
+}
+
+func (b *metric) IsEnabled() bool {
+ return b.enabled
+}
+
+func (b *metric) SetEnabled(e bool) {
+ b.enabled = e
+}
+
+func (b *metric) Opts() Opts {
+ return b.opts
+}
+
+// Vec is a generic type to describe the vectorized version of another metric type, for example Vec[Counter] would be
+// our version of a prometheus.CounterVec.
+type Vec[T any] interface {
+ prometheus.Collector
+ WithMetadata
+
+ // CurryWith returns a vector curried with the provided labels, i.e. the
+ // returned vector has those labels pre-set for all labeled operations performed
+ // on it. The cardinality of the curried vector is reduced accordingly. The
+ // order of the remaining labels stays the same (just with the curried labels
+ // taken out of the sequence – which is relevant for the
+ // (GetMetric)WithLabelValues methods). It is possible to curry a curried
+ // vector, but only with labels not yet used for currying before.
+ //
+ // The metrics contained in the `Vec[T]` are shared between the curried and
+ // uncurried vectors. They are just accessed differently. Curried and uncurried
+ // vectors behave identically in terms of collection. Only one must be
+ // registered with a given registry (usually the uncurried version). The Reset
+ // method deletes all metrics, even if called on a curried vector.
+ CurryWith(labels prometheus.Labels) (Vec[T], error)
+
+ // GetMetricWith returns the `T` for the given Labels map (the label names
+ // must match those of the variable labels in Desc). If that label map is
+ // accessed for the first time, a new `T` is created. Implications of
+ // creating a `T` without using it and keeping the `T` for later use are
+ // the same as for GetMetricWithLabelValues.
+ //
+ // An error is returned if the number and names of the Labels are inconsistent
+ // with those of the variable labels in Desc (minus any curried labels).
+ //
+ // This method is used for the same purpose as
+ // GetMetricWithLabelValues(...string). See there for pros and cons of the two
+ // methods.
+ GetMetricWith(labels prometheus.Labels) (T, error)
+
+ // GetMetricWithLabelValues returns the `T` for the given slice of label
+ // values (same order as the variable labels in Desc). If that combination of
+ // label values is accessed for the first time, a new `T` is created.
+ //
+ // It is possible to call this method without using the returned `T` to only
+ // create the new `T` but leave it at its starting value 0.
+ //
+ // Keeping the `T` for later use is possible (and should be considered if
+ // performance is critical), but keep in mind that Reset, DeleteLabelValues and
+ // Delete can be used to delete the `T` from the `Vec[T]`, assuming it also
+ // implements `DeletableVec[T]`. In that case,
+ // the `T` will still exist, but it will not be exported anymore, even if a
+ // `T` with the same label values is created later.
+ //
+ // An error is returned if the number of label values is not the same as the
+ // number of variable labels in Desc (minus any curried labels).
+ //
+ // Note that for more than one label value, this method is prone to mistakes
+ // caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
+ // an alternative to avoid that type of mistake. For higher label numbers, the
+ // latter has a much more readable (albeit more verbose) syntax, but it comes
+ // with a performance overhead (for creating and processing the Labels map).
+ GetMetricWithLabelValues(lvs ...string) (T, error)
+
+ // With works as GetMetricWith, but panics where GetMetricWithLabels would have
+ // returned an error. Not returning an error allows shortcuts like
+ //
+ // myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
+ With(labels prometheus.Labels) T
+
+ // WithLabelValues works as GetMetricWithLabelValues, but panics where
+ // GetMetricWithLabelValues would have returned an error. Not returning an
+ // error allows shortcuts like
+ //
+ // myVec.WithLabelValues("404", "GET").Add(42)
+ WithLabelValues(lvs ...string) T
+}
+
+// DeletableVec is a generic type to describe a vectorized version of another metric type, like Vec[T], but with the
+// additional ability to remove labels without re-creating the metric.
+type DeletableVec[T any] interface {
+ Vec[T]
+
+ // Delete deletes the metric where the variable labels are the same as those
+ // passed in as labels. It returns true if a metric was deleted.
+ //
+ // It is not an error if the number and names of the Labels are inconsistent
+ // with those of the VariableLabels in Desc. However, such inconsistent Labels
+ // can never match an actual metric, so the method will always return false in
+ // that case.
+ //
+ // This method is used for the same purpose as DeleteLabelValues(...string). See
+ // there for pros and cons of the two methods.
+ Delete(labels prometheus.Labels) bool
+
+ // DeleteLabelValues removes the metric where the variable labels are the same
+ // as those passed in as labels (same order as the VariableLabels in Desc). It
+ // returns true if a metric was deleted.
+ //
+ // It is not an error if the number of label values is not the same as the
+ // number of VariableLabels in Desc. However, such inconsistent label count can
+ // never match an actual metric, so the method will always return false in that
+ // case.
+ //
+ // Note that for more than one label value, this method is prone to mistakes
+ // caused by an incorrect order of arguments. Consider Delete(Labels) as an
+ // alternative to avoid that type of mistake. For higher label numbers, the
+ // latter has a much more readable (albeit more verbose) syntax, but it comes
+ // with a performance overhead (for creating and processing the Labels map).
+ // See also the CounterVec example.
+ DeleteLabelValues(lvs ...string) bool
+
+ // DeletePartialMatch deletes all metrics where the variable labels contain all of those
+ // passed in as labels. The order of the labels does not matter.
+ // It returns the number of metrics deleted.
+ //
+ // Note that curried labels will never be matched if deleting from the curried vector.
+ // To match curried labels with DeletePartialMatch, it must be called on the base vector.
+ DeletePartialMatch(labels prometheus.Labels) int
+
+ // Reset deletes all metrics in this vector.
+ Reset()
+}
+
+// Opts are a modified and extended version of the prometheus.Opts
+// https://pkg.go.dev/github.com/prometheus/client_golang/prometheus#Opts
+type Opts struct {
+ // Namespace, Subsystem, and Name are components of the fully-qualified
+ // name of the Metric (created by joining these components with
+ // "_"). Only Name is mandatory, the others merely help structuring the
+ // name. Note that the fully-qualified name of the metric must be a
+ // valid Prometheus metric name.
+ Namespace string
+ Subsystem string
+ Name string
+
+ // Help provides information about this metric.
+ //
+ // Metrics with the same fully-qualified name must have the same Help
+ // string.
+ Help string
+
+ // ConstLabels are used to attach fixed labels to this metric. Metrics
+ // with the same fully-qualified name must have the same label names in
+ // their ConstLabels.
+ //
+ // ConstLabels are only used rarely. In particular, do not use them to
+ // attach the same labels to all your metrics. Those use cases are
+ // better covered by target labels set by the scraping Prometheus
+ // server, or by one specific metric (e.g. a build_info or a
+ // machine_role metric). See also
+ // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels
+ ConstLabels prometheus.Labels
+
+ // The name used to enable/disable this metric via the config/flags
+ ConfigName string
+
+ // If true, the metric has to be explicitly enabled via config or flags
+ Disabled bool
+}
+
+func (b Opts) GetConfigName() string {
+ if b.ConfigName == "" {
+ return prometheus.BuildFQName(b.Namespace, b.Subsystem, b.Name)
+ }
+ return b.ConfigName
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/metrics/metrics.go b/vendor/github.com/cilium/cilium/pkg/metrics/metrics.go
new file mode 100644
index 000000000..4e7ef5821
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/metrics/metrics.go
@@ -0,0 +1,1602 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Package metrics holds prometheus metrics objects and related utility functions. It
+// does not abstract away the prometheus client but the caller rarely needs to
+// refer to prometheus directly.
+package metrics
+
+// Adding a metric
+// - Add a metric object of the appropriate type as an exported variable
+// - Register the new object in the init function
+
+import (
+ "context"
+ "time"
+
+ "github.com/prometheus/client_golang/prometheus"
+ dto "github.com/prometheus/client_model/go"
+ "github.com/sirupsen/logrus"
+
+ "github.com/cilium/cilium/api/v1/models"
+ "github.com/cilium/cilium/pkg/metrics/metric"
+ "github.com/cilium/cilium/pkg/promise"
+ "github.com/cilium/cilium/pkg/version"
+)
+
+const (
+ // ErrorTimeout is the value used to notify timeout errors.
+ ErrorTimeout = "timeout"
+
+ // ErrorProxy is the value used to notify errors on Proxy.
+ ErrorProxy = "proxy"
+
+ // L7DNS is the value used to report DNS label on metrics
+ L7DNS = "dns"
+
+ // SubsystemBPF is the subsystem to scope metrics related to the bpf syscalls.
+ SubsystemBPF = "bpf"
+
+ // SubsystemDatapath is the subsystem to scope metrics related to management of
+ // the datapath. It is prepended to metric names and separated with a '_'.
+ SubsystemDatapath = "datapath"
+
+ // SubsystemAgent is the subsystem to scope metrics related to the cilium agent itself.
+ SubsystemAgent = "agent"
+
+ // SubsystemFQDN is the subsystem to scope metrics related to the FQDN proxy.
+ SubsystemIPCache = "ipcache"
+
+ // SubsystemK8s is the subsystem to scope metrics related to Kubernetes
+ SubsystemK8s = "k8s"
+
+ // SubsystemK8sClient is the subsystem to scope metrics related to the kubernetes client.
+ SubsystemK8sClient = "k8s_client"
+
+ // SubsystemWorkQueue is the subsystem to scope metrics related to the workqueue.
+ SubsystemWorkQueue = "k8s_workqueue"
+
+ // SubsystemKVStore is the subsystem to scope metrics related to the kvstore.
+ SubsystemKVStore = "kvstore"
+
+ // SubsystemFQDN is the subsystem to scope metrics related to the FQDN proxy.
+ SubsystemFQDN = "fqdn"
+
+ // SubsystemNodes is the subsystem to scope metrics related to the node manager.
+ SubsystemNodes = "nodes"
+
+ // SubsystemTriggers is the subsystem to scope metrics related to the trigger package.
+ SubsystemTriggers = "triggers"
+
+ // SubsystemAPILimiter is the subsystem to scope metrics related to the API limiter package.
+ SubsystemAPILimiter = "api_limiter"
+
+ // CiliumAgentNamespace is used to scope metrics from the Cilium Agent
+ CiliumAgentNamespace = "cilium"
+
+ // CiliumClusterMeshAPIServerNamespace is used to scope metrics from the
+ // Cilium Cluster Mesh API Server
+ CiliumClusterMeshAPIServerNamespace = "cilium_clustermesh_apiserver"
+
+ // CiliumClusterMeshAPIServerNamespace is used to scope metrics from
+ // Cilium KVStoreMesh
+ CiliumKVStoreMeshNamespace = "cilium_kvstoremesh"
+
+ // CiliumOperatorNamespace is used to scope metrics from the Cilium Operator
+ CiliumOperatorNamespace = "cilium_operator"
+
+ // LabelError indicates the type of error (string)
+ LabelError = "error"
+
+ // LabelOutcome indicates whether the outcome of the operation was successful or not
+ LabelOutcome = "outcome"
+
+ // LabelAttempts is the number of attempts it took to complete the operation
+ LabelAttempts = "attempts"
+
+ // Labels
+
+ // LabelValueOutcomeSuccess is used as a successful outcome of an operation
+ LabelValueOutcomeSuccess = "success"
+
+ // LabelValueOutcomeFail is used as an unsuccessful outcome of an operation
+ LabelValueOutcomeFail = "fail"
+
+ // LabelEventSourceAPI marks event-related metrics that come from the API
+ LabelEventSourceAPI = "api"
+
+ // LabelEventSourceK8s marks event-related metrics that come from k8s
+ LabelEventSourceK8s = "k8s"
+
+ // LabelEventSourceFQDN marks event-related metrics that come from pkg/fqdn
+ LabelEventSourceFQDN = "fqdn"
+
+ // LabelEventSourceContainerd marks event-related metrics that come from docker
+ LabelEventSourceContainerd = "docker"
+
+ // LabelDatapathArea marks which area the metrics are related to (eg, which BPF map)
+ LabelDatapathArea = "area"
+
+ // LabelDatapathName marks a unique identifier for this metric.
+ // The name should be defined once for a given type of error.
+ LabelDatapathName = "name"
+
+ // LabelDatapathFamily marks which protocol family (IPv4, IPV6) the metric is related to.
+ LabelDatapathFamily = "family"
+
+ // LabelProtocol marks the L4 protocol (TCP, ANY) for the metric.
+ LabelProtocol = "protocol"
+
+ // LabelSignalType marks the signal name
+ LabelSignalType = "signal"
+
+ // LabelSignalData marks the signal data
+ LabelSignalData = "data"
+
+ // LabelStatus the label from completed task
+ LabelStatus = "status"
+
+ // LabelPolicyEnforcement is the label used to see the enforcement status
+ LabelPolicyEnforcement = "enforcement"
+
+ // LabelPolicySource is the label used to see the enforcement status
+ LabelPolicySource = "source"
+
+ // LabelScope is the label used to defined multiples scopes in the same
+ // metric. For example, one counter may measure a metric over the scope of
+ // the entire event (scope=global), or just part of an event
+ // (scope=slow_path)
+ LabelScope = "scope"
+
+ // LabelProtocolL7 is the label used when working with layer 7 protocols.
+ LabelProtocolL7 = "protocol_l7"
+
+ // LabelBuildState is the state a build queue entry is in
+ LabelBuildState = "state"
+
+ // LabelBuildQueueName is the name of the build queue
+ LabelBuildQueueName = "name"
+
+ // LabelAction is the label used to defined what kind of action was performed in a metric
+ LabelAction = "action"
+
+ // LabelSubsystem is the label used to refer to any of the child process
+ // started by cilium (Envoy, monitor, etc..)
+ LabelSubsystem = "subsystem"
+
+ // LabelKind is the kind of a label
+ LabelKind = "kind"
+
+ // LabelEventSource is the source of a label for event metrics
+ // i.e. k8s, containerd, api.
+ LabelEventSource = "source"
+
+ // LabelPath is the label for the API path
+ LabelPath = "path"
+ // LabelMethod is the label for the HTTP method
+ LabelMethod = "method"
+
+ // LabelAPIReturnCode is the HTTP code returned for that API path
+ LabelAPIReturnCode = "return_code"
+
+ // LabelOperation is the label for BPF maps operations
+ LabelOperation = "operation"
+
+ // LabelMapName is the label for the BPF map name
+ LabelMapName = "map_name"
+
+ LabelMapGroup = "map_group"
+
+ // LabelVersion is the label for the version number
+ LabelVersion = "version"
+
+ // LabelVersionRevision is the label for the version revision
+ LabelVersionRevision = "revision"
+
+ // LabelArch is the label for the platform architecture (e.g. linux/amd64)
+ LabelArch = "arch"
+
+ // LabelDirection is the label for traffic direction
+ LabelDirection = "direction"
+
+ // LabelSourceCluster is the label for source cluster name
+ LabelSourceCluster = "source_cluster"
+
+ // LabelSourceNodeName is the label for source node name
+ LabelSourceNodeName = "source_node_name"
+
+ // LabelTargetCluster is the label for target cluster name
+ LabelTargetCluster = "target_cluster"
+
+ // LabelTargetNodeIP is the label for target node IP
+ LabelTargetNodeIP = "target_node_ip"
+
+ // LabelTargetNodeName is the label for target node name
+ LabelTargetNodeName = "target_node_name"
+
+ // LabelTargetNodeType is the label for target node type (local_node, remote_intra_cluster, vs remote_inter_cluster)
+ LabelTargetNodeType = "target_node_type"
+
+ LabelLocationLocalNode = "local_node"
+ LabelLocationRemoteIntraCluster = "remote_intra_cluster"
+ LabelLocationRemoteInterCluster = "remote_inter_cluster"
+
+ // LabelType is the label for type in general (e.g. endpoint, node)
+ LabelType = "type"
+ LabelPeerEndpoint = "endpoint"
+ LabelPeerNode = "node"
+
+ LabelTrafficHTTP = "http"
+ LabelTrafficICMP = "icmp"
+
+ LabelAddressType = "address_type"
+ LabelAddressTypePrimary = "primary"
+ LabelAddressTypeSecondary = "secondary"
+)
+
+var (
+ // Namespace is used to scope metrics from cilium. It is prepended to metric
+ // names and separated with a '_'
+ Namespace = CiliumAgentNamespace
+
+ registryResolver, registry = promise.New[*Registry]()
+
+ BPFMapPressure = true
+
+ // BootstrapTimes is the durations of cilium-agent bootstrap sequence.
+ BootstrapTimes = NoOpObserverVec
+
+ // APIInteractions is the total time taken to process an API call made
+ // to the cilium-agent
+ APIInteractions = NoOpObserverVec
+
+ // Status
+
+ // NodeConnectivityStatus is the connectivity status between local node to
+ // other node intra or inter cluster.
+ NodeConnectivityStatus = NoOpGaugeDeletableVec
+
+ // NodeConnectivityLatency is the connectivity latency between local node to
+ // other node intra or inter cluster.
+ NodeConnectivityLatency = NoOpGaugeDeletableVec
+
+ // Endpoint
+
+ // Endpoint is a function used to collect this metric.
+ // It must be thread-safe.
+ Endpoint metric.GaugeFunc
+
+ // EndpointMaxIfindex is the maximum observed interface index for existing endpoints
+ EndpointMaxIfindex = NoOpGauge
+
+ // EndpointRegenerationTotal is a count of the number of times any endpoint
+ // has been regenerated and success/fail outcome
+ EndpointRegenerationTotal = NoOpCounterVec
+
+ // EndpointStateCount is the total count of the endpoints in various states.
+ EndpointStateCount = NoOpGaugeVec
+
+ // EndpointRegenerationTimeStats is the total time taken to regenerate
+ // endpoints, labeled by span name and status ("success" or "failure")
+ EndpointRegenerationTimeStats = NoOpObserverVec
+
+ // EndpointPropagationDelay is the delay between creation of local CiliumEndpoint
+ // and update for that CiliumEndpoint received through CiliumEndpointSlice.
+ // Measure of local CEP roundtrip time with CiliumEndpointSlice feature enabled.
+ EndpointPropagationDelay = NoOpObserverVec
+
+ // Policy
+ // Policy is the number of policies loaded into the agent
+ Policy = NoOpGauge
+
+ // PolicyRegenerationCount is the total number of successful policy
+ // regenerations.
+ PolicyRegenerationCount = NoOpCounter
+
+ // PolicyRegenerationTimeStats is the total time taken to generate policies
+ PolicyRegenerationTimeStats = NoOpObserverVec
+
+ // PolicyRevision is the current policy revision number for this agent
+ PolicyRevision = NoOpGauge
+
+ // PolicyChangeTotal is a count of policy changes by outcome ("success" or
+ // "failure")
+ PolicyChangeTotal = NoOpCounterVec
+
+ // PolicyEndpointStatus is the number of endpoints with policy labeled by enforcement type
+ PolicyEndpointStatus = NoOpGaugeVec
+
+ // PolicyImplementationDelay is a distribution of times taken from adding a
+ // policy (and incrementing the policy revision) to seeing it in the datapath
+ // per Endpoint. This reflects the actual delay perceived by traffic flowing
+ // through the datapath. The longest times will roughly correlate with the
+ // time taken to fully deploy an endpoint.
+ PolicyImplementationDelay = NoOpObserverVec
+
+ // CIDRGroup
+
+ // CIDRGroupsReferenced is the number of CNPs and CCNPs referencing at least one CiliumCIDRGroup.
+ // CNPs with empty or non-existing CIDRGroupRefs are not considered.
+ CIDRGroupsReferenced = NoOpGauge
+
+ // CIDRGroupTranslationTimeStats is the time taken to translate the policy field `FromCIDRGroupRef`
+ // after the referenced CIDRGroups have been updated or deleted.
+ CIDRGroupTranslationTimeStats = NoOpHistogram
+
+ // Identity
+
+ // Identity is the number of identities currently in use on the node by type
+ Identity = NoOpGaugeVec
+
+ // Events
+
+ // EventTS*is the time in seconds since epoch that we last received an
+ // event that we will handle
+ // source is one of k8s, docker or apia
+
+ // EventTS is the timestamp of k8s resource events.
+ EventTS = NoOpGaugeVec
+
+ // EventLagK8s is the lag calculation for k8s Pod events.
+ EventLagK8s = NoOpGauge
+
+ // L7 statistics
+
+ // ProxyRedirects is the number of redirects labeled by protocol
+ ProxyRedirects = NoOpGaugeVec
+
+ // ProxyPolicyL7Total is a count of all l7 requests handled by proxy
+ ProxyPolicyL7Total = NoOpCounterVec
+
+ // ProxyUpstreamTime is how long the upstream server took to reply labeled
+ // by error, protocol and span time
+ ProxyUpstreamTime = NoOpObserverVec
+
+ // ProxyDatapathUpdateTimeout is a count of all the timeouts encountered while
+ // updating the datapath due to an FQDN IP update
+ ProxyDatapathUpdateTimeout = NoOpCounter
+
+ // L3-L4 statistics
+
+ // DropCount is the total drop requests,
+ // tagged by drop reason and direction(ingress/egress)
+ DropCount = NoOpCounterVec
+
+ // DropBytes is the total dropped bytes,
+ // tagged by drop reason and direction(ingress/egress)
+ DropBytes = NoOpCounterVec
+
+ // ForwardCount is the total forwarded packets,
+ // tagged by ingress/egress direction
+ ForwardCount = NoOpCounterVec
+
+ // ForwardBytes is the total forwarded bytes,
+ // tagged by ingress/egress direction
+ ForwardBytes = NoOpCounterVec
+
+ // Datapath statistics
+
+ // ConntrackGCRuns is the number of times that the conntrack GC
+ // process was run.
+ ConntrackGCRuns = NoOpCounterVec
+
+ // ConntrackGCKeyFallbacks number of times that the conntrack key fallback was invalid.
+ ConntrackGCKeyFallbacks = NoOpCounterVec
+
+ // ConntrackGCSize the number of entries in the conntrack table
+ ConntrackGCSize = NoOpGaugeVec
+
+ // NatGCSize the number of entries in the nat table
+ NatGCSize = NoOpGaugeVec
+
+ // ConntrackGCDuration the duration of the conntrack GC process in milliseconds.
+ ConntrackGCDuration = NoOpObserverVec
+
+ // ConntrackDumpReset marks the count for conntrack dump resets
+ ConntrackDumpResets = NoOpCounterVec
+
+ // Signals
+
+ // SignalsHandled is the number of signals received.
+ SignalsHandled = NoOpCounterVec
+
+ // Services
+
+ // ServicesEventsCount counts the number of services
+ ServicesEventsCount = NoOpCounterVec
+
+ // Errors and warnings
+
+ // ErrorsWarnings is the number of errors and warnings in cilium-agent instances
+ ErrorsWarnings = NoOpCounterVec
+
+ // ControllerRuns is the number of times that a controller process runs.
+ ControllerRuns = NoOpCounterVec
+
+ // ControllerRunsDuration the duration of the controller process in seconds
+ ControllerRunsDuration = NoOpObserverVec
+
+ // subprocess, labeled by Subsystem
+ SubprocessStart = NoOpCounterVec
+
+ // Kubernetes Events
+
+ // KubernetesEventProcessed is the number of Kubernetes events
+ // processed labeled by scope, action and execution result
+ KubernetesEventProcessed = NoOpCounterVec
+
+ // KubernetesEventReceived is the number of Kubernetes events received
+ // labeled by scope, action, valid data and equalness.
+ KubernetesEventReceived = NoOpCounterVec
+
+ // Kubernetes interactions
+
+ // KubernetesAPIInteractions is the total time taken to process an API call made
+ // to the kube-apiserver
+ KubernetesAPIInteractions = NoOpObserverVec
+
+ // KubernetesAPIRateLimiterLatency is the client side rate limiter latency metric
+ KubernetesAPIRateLimiterLatency = NoOpObserverVec
+
+ // KubernetesAPICallsTotal is the counter for all API calls made to
+ // kube-apiserver.
+ KubernetesAPICallsTotal = NoOpCounterVec
+
+ // KubernetesCNPStatusCompletion is the number of seconds it takes to
+ // complete a CNP status update
+ KubernetesCNPStatusCompletion = NoOpObserverVec
+
+ // TerminatingEndpointsEvents is the number of terminating endpoint events received from kubernetes.
+ TerminatingEndpointsEvents = NoOpCounter
+
+ // IPAM events
+
+ // IPAMEvent is the number of IPAM events received labeled by action and
+ // datapath family type
+ IPAMEvent = NoOpCounterVec
+
+ // IPAMCapacity tracks the total number of IPs that could be allocated. To
+ // get the current number of available IPs, it would be this metric
+ // subtracted by IPAMEvent{allocated}.
+ IPAMCapacity = NoOpGaugeVec
+
+ // KVstore events
+
+ // KVStoreOperationsDuration records the duration of kvstore operations
+ KVStoreOperationsDuration = NoOpObserverVec
+
+ // KVStoreEventsQueueDuration records the duration in seconds of time
+ // received event was blocked before it could be queued
+ KVStoreEventsQueueDuration = NoOpObserverVec
+
+ // KVStoreQuorumErrors records the number of kvstore quorum errors
+ KVStoreQuorumErrors = NoOpCounterVec
+
+ // FQDNGarbageCollectorCleanedTotal is the number of domains cleaned by the
+ // GC job.
+ FQDNGarbageCollectorCleanedTotal = NoOpCounter
+
+ // FQDNActiveNames is the number of domains inside the DNS cache that have
+ // not expired (by TTL), per endpoint.
+ FQDNActiveNames = NoOpGaugeVec
+
+ // FQDNActiveIPs is the number of IPs inside the DNS cache associated with
+ // a domain that has not expired (by TTL) and are currently active, per
+ // endpoint.
+ FQDNActiveIPs = NoOpGaugeVec
+
+ // FQDNAliveZombieConnections is the number IPs associated with domains
+ // that have expired (by TTL) yet still associated with an active
+ // connection (aka zombie), per endpoint.
+ FQDNAliveZombieConnections = NoOpGaugeVec
+
+ // FQDNSemaphoreRejectedTotal is the total number of DNS requests rejected
+ // by the DNS proxy because too many requests were in flight, as enforced by
+ // the admission semaphore.
+ FQDNSemaphoreRejectedTotal = NoOpCounter
+
+ // IPCacheErrorsTotal is the total number of IPCache events handled in
+ // the IPCache subsystem that resulted in errors.
+ IPCacheErrorsTotal = NoOpCounterVec
+
+ // IPCacheEventsTotal is the total number of IPCache events handled in
+ // the IPCache subsystem.
+ IPCacheEventsTotal = NoOpCounterVec
+
+ // BPFSyscallDuration is the metric for bpf syscalls duration.
+ BPFSyscallDuration = NoOpObserverVec
+
+ // BPFMapOps is the metric to measure the number of operations done to a
+ // bpf map.
+ BPFMapOps = NoOpCounterVec
+
+ // BPFMapCapacity is the max capacity of bpf maps, labelled by map group classification.
+ BPFMapCapacity = NoOpGaugeVec
+
+ // TriggerPolicyUpdateTotal is the metric to count total number of
+ // policy update triggers
+ TriggerPolicyUpdateTotal = NoOpCounterVec
+
+ // TriggerPolicyUpdateFolds is the current level folding that is
+ // happening when running policy update triggers
+ TriggerPolicyUpdateFolds = NoOpGauge
+
+ // TriggerPolicyUpdateCallDuration measures the latency and call
+ // duration of policy update triggers
+ TriggerPolicyUpdateCallDuration = NoOpObserverVec
+
+ // VersionMetric labelled by Cilium version
+ VersionMetric = NoOpGaugeVec
+
+ // APILimiterWaitHistoryDuration is a histogram that measures the
+ // individual wait durations of API limiters
+ APILimiterWaitHistoryDuration = NoOpObserverVec
+
+ // APILimiterWaitDuration is the gauge of the current mean, min, and
+ // max wait duration
+ APILimiterWaitDuration = NoOpGaugeVec
+
+ // APILimiterProcessingDuration is the gauge of the mean and estimated
+ // processing duration
+ APILimiterProcessingDuration = NoOpGaugeVec
+
+ // APILimiterRequestsInFlight is the gauge of the current and max
+ // requests in flight
+ APILimiterRequestsInFlight = NoOpGaugeVec
+
+ // APILimiterRateLimit is the gauge of the current rate limiting
+ // configuration including limit and burst
+ APILimiterRateLimit = NoOpGaugeVec
+
+ // APILimiterAdjustmentFactor is the gauge representing the latest
+ // adjustment factor that was applied
+ APILimiterAdjustmentFactor = NoOpGaugeVec
+
+ // APILimiterProcessedRequests is the counter of the number of
+ // processed (successful and failed) requests
+ APILimiterProcessedRequests = NoOpCounterVec
+
+ // WorkQueueDepth is the depth of the workqueue
+ //
+ // We set actual metrics here instead of NoOp for the workqueue metrics
+ // because these metrics will be registered with workqueue.SetProvider
+ // by init function in watcher.go. Otherwise, we will register NoOps.
+ //
+ WorkQueueDepth = metric.NewGaugeVec(metric.GaugeOpts{
+ ConfigName: Namespace + "_" + SubsystemWorkQueue + "_depth",
+ Namespace: Namespace,
+ Subsystem: SubsystemWorkQueue,
+ Name: "depth",
+ Help: "Current depth of workqueue.",
+ }, []string{"name"})
+
+ // WorkQueueAddsTotal is the total number of adds to the workqueue
+ WorkQueueAddsTotal = metric.NewCounterVec(metric.CounterOpts{
+ ConfigName: Namespace + "_" + SubsystemWorkQueue + "_adds_total",
+ Namespace: Namespace,
+ Subsystem: SubsystemWorkQueue,
+ Name: "adds_total",
+ Help: "Total number of adds handled by workqueue.",
+ }, []string{"name"})
+
+ // WorkQueueLatency is the latency of how long an item stays in the workqueue
+ WorkQueueLatency = metric.NewHistogramVec(metric.HistogramOpts{
+ ConfigName: Namespace + "_" + SubsystemWorkQueue + "_queue_duration_seconds",
+ Namespace: Namespace,
+ Subsystem: SubsystemWorkQueue,
+ Name: "queue_duration_seconds",
+ Help: "How long in seconds an item stays in workqueue before being requested.",
+ Buckets: prometheus.ExponentialBuckets(10e-9, 10, 10),
+ }, []string{"name"})
+
+ // WorkQueueDuration is the duration of how long processing an item for the workqueue
+ WorkQueueDuration = metric.NewHistogramVec(metric.HistogramOpts{
+ ConfigName: Namespace + "_" + SubsystemWorkQueue + "_work_duration_seconds",
+ Namespace: Namespace,
+ Subsystem: SubsystemWorkQueue,
+ Name: "work_duration_seconds",
+ Help: "How long in seconds processing an item from workqueue takes.",
+ Buckets: prometheus.ExponentialBuckets(10e-9, 10, 10),
+ }, []string{"name"})
+
+ // WorkQueueUnfinishedWork is how many seconds of work has been done that is in progress
+ WorkQueueUnfinishedWork = metric.NewGaugeVec(metric.GaugeOpts{
+ ConfigName: Namespace + "_" + SubsystemWorkQueue + "_unfinished_work_seconds",
+ Namespace: Namespace,
+ Subsystem: SubsystemWorkQueue,
+ Name: "unfinished_work_seconds",
+ Help: "How many seconds of work has been done that " +
+ "is in progress and hasn't been observed by work_duration. Large " +
+ "values indicate stuck threads. One can deduce the number of stuck " +
+ "threads by observing the rate at which this increases.",
+ }, []string{"name"})
+
+ // WorkQueueLongestRunningProcessor is the longest running processor in the workqueue
+ WorkQueueLongestRunningProcessor = metric.NewGaugeVec(metric.GaugeOpts{
+ ConfigName: Namespace + "_" + SubsystemWorkQueue + "_longest_running_processor_seconds",
+ Namespace: Namespace,
+ Subsystem: SubsystemWorkQueue,
+ Name: "longest_running_processor_seconds",
+ Help: "How many seconds has the longest running " +
+ "processor for workqueue been running.",
+ }, []string{"name"})
+
+ // WorkQueueRetries is the number of retries for handled by the workqueue
+ WorkQueueRetries = metric.NewCounterVec(metric.CounterOpts{
+ ConfigName: Namespace + "_" + SubsystemWorkQueue + "_retries_total",
+ Namespace: Namespace,
+ Subsystem: SubsystemWorkQueue,
+ Name: "retries_total",
+ Help: "Total number of retries handled by workqueue.",
+ }, []string{"name"})
+)
+
+type LegacyMetrics struct {
+ BootstrapTimes metric.Vec[metric.Observer]
+ APIInteractions metric.Vec[metric.Observer]
+ NodeConnectivityStatus metric.DeletableVec[metric.Gauge]
+ NodeConnectivityLatency metric.DeletableVec[metric.Gauge]
+ Endpoint metric.GaugeFunc
+ EndpointMaxIfindex metric.Gauge
+ EndpointRegenerationTotal metric.Vec[metric.Counter]
+ EndpointStateCount metric.Vec[metric.Gauge]
+ EndpointRegenerationTimeStats metric.Vec[metric.Observer]
+ EndpointPropagationDelay metric.Vec[metric.Observer]
+ Policy metric.Gauge
+ PolicyRegenerationCount metric.Counter
+ PolicyRegenerationTimeStats metric.Vec[metric.Observer]
+ PolicyRevision metric.Gauge
+ PolicyChangeTotal metric.Vec[metric.Counter]
+ PolicyEndpointStatus metric.Vec[metric.Gauge]
+ PolicyImplementationDelay metric.Vec[metric.Observer]
+ CIDRGroupsReferenced metric.Gauge
+ CIDRGroupTranslationTimeStats metric.Histogram
+ Identity metric.Vec[metric.Gauge]
+ EventTS metric.Vec[metric.Gauge]
+ EventLagK8s metric.Gauge
+ ProxyRedirects metric.Vec[metric.Gauge]
+ ProxyPolicyL7Total metric.Vec[metric.Counter]
+ ProxyUpstreamTime metric.Vec[metric.Observer]
+ ProxyDatapathUpdateTimeout metric.Counter
+ DropCount metric.Vec[metric.Counter]
+ DropBytes metric.Vec[metric.Counter]
+ ForwardCount metric.Vec[metric.Counter]
+ ForwardBytes metric.Vec[metric.Counter]
+ ConntrackGCRuns metric.Vec[metric.Counter]
+ ConntrackGCKeyFallbacks metric.Vec[metric.Counter]
+ ConntrackGCSize metric.Vec[metric.Gauge]
+ NatGCSize metric.Vec[metric.Gauge]
+ ConntrackGCDuration metric.Vec[metric.Observer]
+ ConntrackDumpResets metric.Vec[metric.Counter]
+ SignalsHandled metric.Vec[metric.Counter]
+ ServicesEventsCount metric.Vec[metric.Counter]
+ ErrorsWarnings metric.Vec[metric.Counter]
+ ControllerRuns metric.Vec[metric.Counter]
+ ControllerRunsDuration metric.Vec[metric.Observer]
+ SubprocessStart metric.Vec[metric.Counter]
+ KubernetesEventProcessed metric.Vec[metric.Counter]
+ KubernetesEventReceived metric.Vec[metric.Counter]
+ KubernetesAPIInteractions metric.Vec[metric.Observer]
+ KubernetesAPIRateLimiterLatency metric.Vec[metric.Observer]
+ KubernetesAPICallsTotal metric.Vec[metric.Counter]
+ KubernetesCNPStatusCompletion metric.Vec[metric.Observer]
+ TerminatingEndpointsEvents metric.Counter
+ IPAMEvent metric.Vec[metric.Counter]
+ IPAMCapacity metric.Vec[metric.Gauge]
+ KVStoreOperationsDuration metric.Vec[metric.Observer]
+ KVStoreEventsQueueDuration metric.Vec[metric.Observer]
+ KVStoreQuorumErrors metric.Vec[metric.Counter]
+ FQDNGarbageCollectorCleanedTotal metric.Counter
+ FQDNActiveNames metric.Vec[metric.Gauge]
+ FQDNActiveIPs metric.Vec[metric.Gauge]
+ FQDNAliveZombieConnections metric.Vec[metric.Gauge]
+ FQDNSemaphoreRejectedTotal metric.Counter
+ IPCacheErrorsTotal metric.Vec[metric.Counter]
+ IPCacheEventsTotal metric.Vec[metric.Counter]
+ BPFSyscallDuration metric.Vec[metric.Observer]
+ BPFMapOps metric.Vec[metric.Counter]
+ BPFMapCapacity metric.Vec[metric.Gauge]
+ TriggerPolicyUpdateTotal metric.Vec[metric.Counter]
+ TriggerPolicyUpdateFolds metric.Gauge
+ TriggerPolicyUpdateCallDuration metric.Vec[metric.Observer]
+ VersionMetric metric.Vec[metric.Gauge]
+ APILimiterWaitHistoryDuration metric.Vec[metric.Observer]
+ APILimiterWaitDuration metric.Vec[metric.Gauge]
+ APILimiterProcessingDuration metric.Vec[metric.Gauge]
+ APILimiterRequestsInFlight metric.Vec[metric.Gauge]
+ APILimiterRateLimit metric.Vec[metric.Gauge]
+ APILimiterAdjustmentFactor metric.Vec[metric.Gauge]
+ APILimiterProcessedRequests metric.Vec[metric.Counter]
+ WorkQueueDepth metric.Vec[metric.Gauge]
+ WorkQueueAddsTotal metric.Vec[metric.Counter]
+ WorkQueueLatency metric.Vec[metric.Observer]
+ WorkQueueDuration metric.Vec[metric.Observer]
+ WorkQueueUnfinishedWork metric.Vec[metric.Gauge]
+ WorkQueueLongestRunningProcessor metric.Vec[metric.Gauge]
+ WorkQueueRetries metric.Vec[metric.Counter]
+}
+
+func NewLegacyMetrics() *LegacyMetrics {
+ lm := &LegacyMetrics{
+ BootstrapTimes: metric.NewHistogramVec(metric.HistogramOpts{
+ ConfigName: Namespace + "_" + SubsystemAgent + "_bootstrap_seconds",
+ Namespace: Namespace,
+ Subsystem: SubsystemAgent,
+ Name: "bootstrap_seconds",
+ Help: "Duration of bootstrap sequence",
+ }, []string{LabelScope, LabelOutcome}),
+
+ APIInteractions: metric.NewHistogramVec(metric.HistogramOpts{
+ ConfigName: Namespace + "_" + SubsystemAgent + "_api_process_time_seconds",
+
+ Namespace: Namespace,
+ Subsystem: SubsystemAgent,
+ Name: "api_process_time_seconds",
+ Help: "Duration of processed API calls labeled by path, method and return code.",
+ }, []string{LabelPath, LabelMethod, LabelAPIReturnCode}),
+
+ EndpointRegenerationTotal: metric.NewCounterVec(metric.CounterOpts{
+ ConfigName: Namespace + "_endpoint_regenerations_total",
+
+ Namespace: Namespace,
+ Name: "endpoint_regenerations_total",
+ Help: "Count of all endpoint regenerations that have completed, tagged by outcome",
+ }, []string{"outcome"}),
+
+ EndpointStateCount: metric.NewGaugeVec(metric.GaugeOpts{
+ ConfigName: Namespace + "_endpoint_state",
+ Namespace: Namespace,
+ Name: "endpoint_state",
+ Help: "Count of all endpoints, tagged by different endpoint states",
+ },
+ []string{"endpoint_state"},
+ ),
+
+ EndpointRegenerationTimeStats: metric.NewHistogramVec(metric.HistogramOpts{
+ ConfigName: Namespace + "_endpoint_regeneration_time_stats_seconds",
+
+ Namespace: Namespace,
+ Name: "endpoint_regeneration_time_stats_seconds",
+ Help: "Endpoint regeneration time stats labeled by the scope",
+ }, []string{LabelScope, LabelStatus}),
+
+ Policy: metric.NewGauge(metric.GaugeOpts{
+ ConfigName: Namespace + "_policy",
+ Namespace: Namespace,
+ Name: "policy",
+ Help: "Number of policies currently loaded",
+ }),
+
+ PolicyRegenerationCount: metric.NewCounter(metric.CounterOpts{
+ ConfigName: Namespace + "_policy_regeneration_total",
+ Namespace: Namespace,
+ Name: "policy_regeneration_total",
+ Help: "Total number of successful policy regenerations",
+ }),
+
+ PolicyRegenerationTimeStats: metric.NewHistogramVec(metric.HistogramOpts{
+ ConfigName: Namespace + "_policy_regeneration_time_stats_seconds",
+ Namespace: Namespace,
+ Name: "policy_regeneration_time_stats_seconds",
+ Help: "Policy regeneration time stats labeled by the scope",
+ }, []string{LabelScope, LabelStatus}),
+
+ PolicyRevision: metric.NewGauge(metric.GaugeOpts{
+ ConfigName: Namespace + "_policy_max_revision",
+ Namespace: Namespace,
+ Name: "policy_max_revision",
+ Help: "Highest policy revision number in the agent",
+ }),
+
+ PolicyChangeTotal: metric.NewCounterVec(metric.CounterOpts{
+ ConfigName: Namespace + "_policy_change_total",
+
+ Namespace: Namespace,
+ Name: "policy_change_total",
+ Help: "Number of policy changes by outcome",
+ }, []string{"outcome"}),
+
+ PolicyEndpointStatus: metric.NewGaugeVec(metric.GaugeOpts{
+ ConfigName: Namespace + "_policy_endpoint_enforcement_status",
+
+ Namespace: Namespace,
+ Name: "policy_endpoint_enforcement_status",
+ Help: "Number of endpoints labeled by policy enforcement status",
+ }, []string{LabelPolicyEnforcement}),
+
+ PolicyImplementationDelay: metric.NewHistogramVec(metric.HistogramOpts{
+ ConfigName: Namespace + "_policy_implementation_delay",
+
+ Namespace: Namespace,
+ Name: "policy_implementation_delay",
+ Help: "Time between a policy change and it being fully deployed into the datapath",
+ }, []string{LabelPolicySource}),
+
+ CIDRGroupsReferenced: metric.NewGauge(metric.GaugeOpts{
+ ConfigName: Namespace + "cidrgroups_referenced",
+
+ Namespace: Namespace,
+ Name: "cidrgroups_referenced",
+ Help: "Number of CNPs and CCNPs referencing at least one CiliumCIDRGroup. CNPs with empty or non-existing CIDRGroupRefs are not considered",
+ }),
+
+ CIDRGroupTranslationTimeStats: metric.NewHistogram(metric.HistogramOpts{
+ ConfigName: Namespace + "cidrgroup_translation_time_stats_seconds",
+ Disabled: true,
+
+ Namespace: Namespace,
+ Name: "cidrgroup_translation_time_stats_seconds",
+ Help: "CIDRGroup translation time stats",
+ }),
+
+ Identity: metric.NewGaugeVec(metric.GaugeOpts{
+ ConfigName: Namespace + "_identity",
+
+ Namespace: Namespace,
+ Name: "identity",
+ Help: "Number of identities currently allocated",
+ }, []string{LabelType}),
+
+ EventTS: metric.NewGaugeVec(metric.GaugeOpts{
+ ConfigName: Namespace + "_event_ts",
+ Namespace: Namespace,
+ Name: "event_ts",
+ Help: "Last timestamp when we received an event",
+ }, []string{LabelEventSource, LabelScope, LabelAction}),
+
+ EventLagK8s: metric.NewGauge(metric.GaugeOpts{
+ ConfigName: Namespace + "_k8s_event_lag_seconds",
+ Disabled: true,
+ Namespace: Namespace,
+ Name: "k8s_event_lag_seconds",
+ Help: "Lag for Kubernetes events - computed value between receiving a CNI ADD event from kubelet and a Pod event received from kube-api-server",
+ ConstLabels: prometheus.Labels{"source": LabelEventSourceK8s},
+ }),
+
+ ProxyRedirects: metric.NewGaugeVec(metric.GaugeOpts{
+ ConfigName: Namespace + "_proxy_redirects",
+
+ Namespace: Namespace,
+ Name: "proxy_redirects",
+ Help: "Number of redirects installed for endpoints, labeled by protocol",
+ }, []string{LabelProtocolL7}),
+
+ ProxyPolicyL7Total: metric.NewCounterVec(metric.CounterOpts{
+ ConfigName: Namespace + "_policy_l7_total",
+ Namespace: Namespace,
+ Name: "policy_l7_total",
+ Help: "Number of total proxy requests handled",
+ }, []string{"rule", "proxy_type"}),
+
+ ProxyUpstreamTime: metric.NewHistogramVec(metric.HistogramOpts{
+ ConfigName: Namespace + "_proxy_upstream_reply_seconds",
+ Namespace: Namespace,
+ Name: "proxy_upstream_reply_seconds",
+ Help: "Seconds waited to get a reply from a upstream server",
+ }, []string{"error", LabelProtocolL7, LabelScope}),
+
+ ProxyDatapathUpdateTimeout: metric.NewCounter(metric.CounterOpts{
+ ConfigName: Namespace + "_proxy_datapath_update_timeout_total",
+ Disabled: true,
+
+ Namespace: Namespace,
+ Name: "proxy_datapath_update_timeout_total",
+ Help: "Number of total datapath update timeouts due to FQDN IP updates",
+ }),
+
+ DropCount: metric.NewCounterVec(metric.CounterOpts{
+ ConfigName: Namespace + "_drop_count_total",
+ Namespace: Namespace,
+ Name: "drop_count_total",
+ Help: "Total dropped packets, tagged by drop reason and ingress/egress direction",
+ },
+ []string{"reason", LabelDirection}),
+
+ DropBytes: metric.NewCounterVec(metric.CounterOpts{
+ ConfigName: Namespace + "_drop_bytes_total",
+ Namespace: Namespace,
+ Name: "drop_bytes_total",
+ Help: "Total dropped bytes, tagged by drop reason and ingress/egress direction",
+ },
+ []string{"reason", LabelDirection}),
+
+ ForwardCount: metric.NewCounterVec(metric.CounterOpts{
+ ConfigName: Namespace + "_forward_count_total",
+ Namespace: Namespace,
+ Name: "forward_count_total",
+ Help: "Total forwarded packets, tagged by ingress/egress direction",
+ },
+ []string{LabelDirection}),
+
+ ForwardBytes: metric.NewCounterVec(metric.CounterOpts{
+ ConfigName: Namespace + "_forward_bytes_total",
+ Namespace: Namespace,
+ Name: "forward_bytes_total",
+ Help: "Total forwarded bytes, tagged by ingress/egress direction",
+ },
+ []string{LabelDirection}),
+
+ ConntrackGCRuns: metric.NewCounterVec(metric.CounterOpts{
+ ConfigName: Namespace + "_" + SubsystemDatapath + "_conntrack_gc_runs_total",
+ Namespace: Namespace,
+ Subsystem: SubsystemDatapath,
+ Name: "conntrack_gc_runs_total",
+ Help: "Number of times that the conntrack garbage collector process was run " +
+ "labeled by completion status",
+ }, []string{LabelDatapathFamily, LabelProtocol, LabelStatus}),
+
+ ConntrackGCKeyFallbacks: metric.NewCounterVec(metric.CounterOpts{
+ ConfigName: Namespace + "_" + SubsystemDatapath + "_conntrack_gc_key_fallbacks_total",
+ Namespace: Namespace,
+ Subsystem: SubsystemDatapath,
+ Name: "conntrack_gc_key_fallbacks_total",
+ Help: "Number of times a key fallback was needed when iterating over the BPF map",
+ }, []string{LabelDatapathFamily, LabelProtocol}),
+
+ ConntrackGCSize: metric.NewGaugeVec(metric.GaugeOpts{
+ ConfigName: Namespace + "_" + SubsystemDatapath + "_conntrack_gc_entries",
+ Namespace: Namespace,
+ Subsystem: SubsystemDatapath,
+ Name: "conntrack_gc_entries",
+ Help: "The number of alive and deleted conntrack entries at the end " +
+ "of a garbage collector run labeled by datapath family.",
+ }, []string{LabelDatapathFamily, LabelProtocol, LabelStatus}),
+
+ NatGCSize: metric.NewGaugeVec(metric.GaugeOpts{
+ ConfigName: Namespace + "_" + SubsystemDatapath + "_nat_gc_entries",
+ Disabled: true,
+ Namespace: Namespace,
+ Subsystem: SubsystemDatapath,
+ Name: "nat_gc_entries",
+ Help: "The number of alive and deleted nat entries at the end " +
+ "of a garbage collector run labeled by datapath family.",
+ }, []string{LabelDatapathFamily, LabelDirection, LabelStatus}),
+
+ ConntrackGCDuration: metric.NewHistogramVec(metric.HistogramOpts{
+ ConfigName: Namespace + "_" + SubsystemDatapath + "_conntrack_gc_duration_seconds",
+ Namespace: Namespace,
+ Subsystem: SubsystemDatapath,
+ Name: "conntrack_gc_duration_seconds",
+ Help: "Duration in seconds of the garbage collector process " +
+ "labeled by datapath family and completion status",
+ }, []string{LabelDatapathFamily, LabelProtocol, LabelStatus}),
+
+ ConntrackDumpResets: metric.NewCounterVec(metric.CounterOpts{
+ ConfigName: Namespace + "_" + SubsystemDatapath + "_conntrack_dump_resets_total",
+ Namespace: Namespace,
+ Subsystem: SubsystemDatapath,
+ Name: "conntrack_dump_resets_total",
+ Help: "Number of conntrack dump resets. Happens when a BPF entry gets removed while dumping the map is in progress",
+ }, []string{LabelDatapathArea, LabelDatapathName, LabelDatapathFamily}),
+
+ SignalsHandled: metric.NewCounterVec(metric.CounterOpts{
+ ConfigName: Namespace + "_" + SubsystemDatapath + "_signals_handled_total",
+
+ Namespace: Namespace,
+ Subsystem: SubsystemDatapath,
+ Name: "signals_handled_total",
+ Help: "Number of times that the datapath signal handler process was run " +
+ "labeled by signal type, data and completion status",
+ }, []string{LabelSignalType, LabelSignalData, LabelStatus}),
+
+ ServicesEventsCount: metric.NewCounterVec(metric.CounterOpts{
+ ConfigName: Namespace + "_services_events_total",
+ Namespace: Namespace,
+ Name: "services_events_total",
+ Help: "Number of services events labeled by action type",
+ }, []string{LabelAction}),
+
+ ErrorsWarnings: metric.NewCounterVec(metric.CounterOpts{
+ ConfigName: Namespace + "_errors_warnings_total",
+ Namespace: Namespace,
+ Name: "errors_warnings_total",
+ Help: "Number of total errors in cilium-agent instances",
+ }, []string{"level", "subsystem"}),
+
+ ControllerRuns: metric.NewCounterVec(metric.CounterOpts{
+ ConfigName: Namespace + "_controllers_runs_total",
+ Namespace: Namespace,
+ Name: "controllers_runs_total",
+ Help: "Number of times that a controller process was run labeled by completion status",
+ }, []string{LabelStatus}),
+
+ ControllerRunsDuration: metric.NewHistogramVec(metric.HistogramOpts{
+ ConfigName: Namespace + "_controllers_runs_duration_seconds",
+ Namespace: Namespace,
+ Name: "controllers_runs_duration_seconds",
+ Help: "Duration in seconds of the controller process labeled by completion status",
+ }, []string{LabelStatus}),
+
+ SubprocessStart: metric.NewCounterVec(metric.CounterOpts{
+ ConfigName: Namespace + "_subprocess_start_total",
+ Namespace: Namespace,
+ Name: "subprocess_start_total",
+ Help: "Number of times that Cilium has started a subprocess, labeled by subsystem",
+ }, []string{LabelSubsystem}),
+
+ KubernetesEventProcessed: metric.NewCounterVec(metric.CounterOpts{
+ ConfigName: Namespace + "_kubernetes_events_total",
+ Namespace: Namespace,
+ Name: "kubernetes_events_total",
+ Help: "Number of Kubernetes events processed labeled by scope, action and execution result",
+ }, []string{LabelScope, LabelAction, LabelStatus}),
+
+ KubernetesEventReceived: metric.NewCounterVec(metric.CounterOpts{
+ ConfigName: Namespace + "_kubernetes_events_received_total",
+ Namespace: Namespace,
+ Name: "kubernetes_events_received_total",
+ Help: "Number of Kubernetes events received labeled by scope, action, valid data and equalness",
+ }, []string{LabelScope, LabelAction, "valid", "equal"}),
+
+ KubernetesAPIInteractions: metric.NewHistogramVec(metric.HistogramOpts{
+ ConfigName: Namespace + "_" + SubsystemK8sClient + "_api_latency_time_seconds",
+ Namespace: Namespace,
+ Subsystem: SubsystemK8sClient,
+ Name: "api_latency_time_seconds",
+ Help: "Duration of processed API calls labeled by path and method.",
+ }, []string{LabelPath, LabelMethod}),
+
+ KubernetesAPIRateLimiterLatency: metric.NewHistogramVec(metric.HistogramOpts{
+ ConfigName: Namespace + "_" + SubsystemK8sClient + "_rate_limiter_duration_seconds",
+ Namespace: Namespace,
+ Subsystem: SubsystemK8sClient,
+ Name: "rate_limiter_duration_seconds",
+ Help: "Kubernetes client rate limiter latency in seconds. Broken down by path and method.",
+ Buckets: []float64{0.005, 0.025, 0.1, 0.25, 0.5, 1.0, 2.0, 4.0, 8.0, 15.0, 30.0, 60.0},
+ }, []string{LabelPath, LabelMethod}),
+
+ KubernetesAPICallsTotal: metric.NewCounterVec(metric.CounterOpts{
+ ConfigName: Namespace + "_" + SubsystemK8sClient + "_api_calls_total",
+ Namespace: Namespace,
+ Subsystem: SubsystemK8sClient,
+ Name: "api_calls_total",
+ Help: "Number of API calls made to kube-apiserver labeled by host, method and return code.",
+ }, []string{"host", LabelMethod, LabelAPIReturnCode}),
+
+ KubernetesCNPStatusCompletion: metric.NewHistogramVec(metric.HistogramOpts{
+ ConfigName: Namespace + "_" + SubsystemK8s + "_cnp_status_completion_seconds",
+ Namespace: Namespace,
+ Subsystem: SubsystemK8s,
+ Name: "cnp_status_completion_seconds",
+ Help: "Duration in seconds in how long it took to complete a CNP status update",
+ }, []string{LabelAttempts, LabelOutcome}),
+
+ TerminatingEndpointsEvents: metric.NewCounter(metric.CounterOpts{
+ ConfigName: Namespace + "_" + SubsystemK8s + "_terminating_endpoints_events_total",
+ Namespace: Namespace,
+ Subsystem: SubsystemK8s,
+ Name: "terminating_endpoints_events_total",
+ Help: "Number of terminating endpoint events received from Kubernetes",
+ }),
+
+ IPAMEvent: metric.NewCounterVec(metric.CounterOpts{
+ ConfigName: Namespace + "_ipam_events_total",
+ Namespace: Namespace,
+ Name: "ipam_events_total",
+ Help: "Number of IPAM events received labeled by action and datapath family type",
+ }, []string{LabelAction, LabelDatapathFamily}),
+
+ IPAMCapacity: metric.NewGaugeVec(metric.GaugeOpts{
+ ConfigName: Namespace + "_ipam_capacity",
+ Namespace: Namespace,
+ Name: "ipam_capacity",
+ Help: "Total number of IPs in the IPAM pool labeled by family",
+ }, []string{LabelDatapathFamily}),
+
+ KVStoreOperationsDuration: metric.NewHistogramVec(metric.HistogramOpts{
+ ConfigName: Namespace + "_" + SubsystemKVStore + "_operations_duration_seconds",
+ Namespace: Namespace,
+ Subsystem: SubsystemKVStore,
+ Name: "operations_duration_seconds",
+ Help: "Duration in seconds of kvstore operations",
+ }, []string{LabelScope, LabelKind, LabelAction, LabelOutcome}),
+
+ KVStoreEventsQueueDuration: metric.NewHistogramVec(metric.HistogramOpts{
+ ConfigName: Namespace + "_" + SubsystemKVStore + "_events_queue_seconds",
+ Namespace: Namespace,
+ Subsystem: SubsystemKVStore,
+ Name: "events_queue_seconds",
+ Help: "Seconds waited before a received event was queued",
+ Buckets: []float64{.002, .005, .01, .015, .025, .05, .1, .25, .5, .75, 1},
+ }, []string{LabelScope, LabelAction}),
+
+ KVStoreQuorumErrors: metric.NewCounterVec(metric.CounterOpts{
+ ConfigName: Namespace + "_" + SubsystemKVStore + "_quorum_errors_total",
+ Namespace: Namespace,
+ Subsystem: SubsystemKVStore,
+ Name: "quorum_errors_total",
+ Help: "Number of quorum errors",
+ }, []string{LabelError}),
+
+ IPCacheErrorsTotal: metric.NewCounterVec(metric.CounterOpts{
+ ConfigName: Namespace + "_" + SubsystemIPCache + "_errors_total",
+ Namespace: Namespace,
+ Subsystem: SubsystemIPCache,
+ Name: "errors_total",
+ Help: "Number of errors interacting with the IP to Identity cache",
+ }, []string{LabelType, LabelError}),
+
+ IPCacheEventsTotal: metric.NewCounterVec(metric.CounterOpts{
+ ConfigName: Namespace + "_" + SubsystemIPCache + "_events_total",
+ Disabled: true,
+ Namespace: Namespace,
+ Subsystem: SubsystemIPCache,
+ Name: "events_total",
+ Help: "Number of events interacting with the IP to Identity cache",
+ }, []string{LabelType}),
+
+ FQDNGarbageCollectorCleanedTotal: metric.NewCounter(metric.CounterOpts{
+ ConfigName: Namespace + "_" + SubsystemFQDN + "_gc_deletions_total",
+ Namespace: Namespace,
+ Subsystem: SubsystemFQDN,
+ Name: "gc_deletions_total",
+ Help: "Number of FQDNs that have been cleaned on FQDN Garbage collector job",
+ }),
+
+ FQDNActiveNames: metric.NewGaugeVec(metric.GaugeOpts{
+ ConfigName: Namespace + "_" + SubsystemFQDN + "_active_names",
+ Disabled: true,
+ Namespace: Namespace,
+ Subsystem: SubsystemFQDN,
+ Name: "active_names",
+ Help: "Number of domains inside the DNS cache that have not expired (by TTL), per endpoint",
+ }, []string{LabelPeerEndpoint}),
+
+ FQDNActiveIPs: metric.NewGaugeVec(metric.GaugeOpts{
+ ConfigName: Namespace + "_" + SubsystemFQDN + "_active_ips",
+ Disabled: true,
+ Namespace: Namespace,
+ Subsystem: SubsystemFQDN,
+ Name: "active_ips",
+ Help: "Number of IPs inside the DNS cache associated with a domain that has not expired (by TTL), per endpoint",
+ }, []string{LabelPeerEndpoint}),
+
+ FQDNAliveZombieConnections: metric.NewGaugeVec(metric.GaugeOpts{
+ ConfigName: Namespace + "_" + SubsystemFQDN + "_alive_zombie_connections",
+ Disabled: true,
+ Namespace: Namespace,
+ Subsystem: SubsystemFQDN,
+ Name: "alive_zombie_connections",
+ Help: "Number of IPs associated with domains that have expired (by TTL) yet still associated with an active connection (aka zombie), per endpoint",
+ }, []string{LabelPeerEndpoint}),
+
+ FQDNSemaphoreRejectedTotal: metric.NewCounter(metric.CounterOpts{
+ ConfigName: Namespace + "_" + SubsystemFQDN + "_semaphore_rejected_total",
+ Disabled: true,
+ Namespace: Namespace,
+ Subsystem: SubsystemFQDN,
+ Name: "semaphore_rejected_total",
+ Help: "Number of DNS request rejected by the DNS Proxy's admission semaphore",
+ }),
+
+ BPFSyscallDuration: metric.NewHistogramVec(metric.HistogramOpts{
+ ConfigName: Namespace + "_" + SubsystemBPF + "_syscall_duration_seconds",
+ Disabled: true,
+ Namespace: Namespace,
+ Subsystem: SubsystemBPF,
+ Name: "syscall_duration_seconds",
+ Help: "Duration of BPF system calls",
+ }, []string{LabelOperation, LabelOutcome}),
+
+ BPFMapOps: metric.NewCounterVec(metric.CounterOpts{
+ ConfigName: Namespace + "_" + SubsystemBPF + "_map_ops_total",
+ Namespace: Namespace,
+ Subsystem: SubsystemBPF,
+ Name: "map_ops_total",
+ Help: "Total operations on map, tagged by map name",
+ }, []string{LabelMapName, LabelOperation, LabelOutcome}),
+
+ BPFMapCapacity: metric.NewGaugeVec(metric.GaugeOpts{
+ ConfigName: Namespace + "_" + SubsystemBPF + "_map_capacity",
+ Namespace: Namespace,
+ Subsystem: SubsystemBPF,
+ Name: "map_capacity",
+ Help: "Capacity of map, tagged by map group. All maps with a capacity of 65536 are grouped under 'default'",
+ }, []string{LabelMapGroup}),
+
+ TriggerPolicyUpdateTotal: metric.NewCounterVec(metric.CounterOpts{
+ ConfigName: Namespace + "_" + SubsystemTriggers + "_policy_update_total",
+ Namespace: Namespace,
+ Subsystem: SubsystemTriggers,
+ Name: "policy_update_total",
+ Help: "Total number of policy update trigger invocations labeled by reason",
+ }, []string{"reason"}),
+
+ TriggerPolicyUpdateFolds: metric.NewGauge(metric.GaugeOpts{
+ ConfigName: Namespace + "_" + SubsystemTriggers + "_policy_update_folds",
+ Namespace: Namespace,
+ Subsystem: SubsystemTriggers,
+ Name: "policy_update_folds",
+ Help: "Current number of folds",
+ }),
+
+ TriggerPolicyUpdateCallDuration: metric.NewHistogramVec(metric.HistogramOpts{
+ ConfigName: Namespace + "_" + SubsystemTriggers + "_policy_update_call_duration_seconds",
+ Namespace: Namespace,
+ Subsystem: SubsystemTriggers,
+ Name: "policy_update_call_duration_seconds",
+ Help: "Duration of policy update trigger",
+ }, []string{LabelType}),
+
+ VersionMetric: metric.NewGaugeVec(metric.GaugeOpts{
+ ConfigName: Namespace + "_version",
+ Namespace: Namespace,
+ Name: "version",
+ Help: "Cilium version",
+ }, []string{LabelVersion, LabelVersionRevision, LabelArch}),
+
+ APILimiterWaitHistoryDuration: metric.NewHistogramVec(metric.HistogramOpts{
+ ConfigName: Namespace + "_" + SubsystemAPILimiter + "_wait_history_duration_seconds",
+ Disabled: true,
+ Namespace: Namespace,
+ Subsystem: SubsystemAPILimiter,
+ Name: "wait_history_duration_seconds",
+ Help: "Histogram over duration of waiting period for API calls subjects to rate limiting",
+ }, []string{"api_call"}),
+
+ APILimiterWaitDuration: metric.NewGaugeVec(metric.GaugeOpts{
+ ConfigName: Namespace + "_" + SubsystemAPILimiter + "_wait_duration_seconds",
+ Namespace: Namespace,
+ Subsystem: SubsystemAPILimiter,
+ Name: "wait_duration_seconds",
+ Help: "Current wait time for api calls",
+ }, []string{"api_call", "value"}),
+
+ APILimiterProcessingDuration: metric.NewGaugeVec(metric.GaugeOpts{
+ ConfigName: Namespace + "_" + SubsystemAPILimiter + "_processing_duration_seconds",
+ Namespace: Namespace,
+ Subsystem: SubsystemAPILimiter,
+ Name: "processing_duration_seconds",
+ Help: "Current processing time of api call",
+ }, []string{"api_call", "value"}),
+
+ APILimiterRequestsInFlight: metric.NewGaugeVec(metric.GaugeOpts{
+ ConfigName: Namespace + "_" + SubsystemAPILimiter + "_requests_in_flight",
+ Namespace: Namespace,
+ Subsystem: SubsystemAPILimiter,
+ Name: "requests_in_flight",
+ Help: "Current requests in flight",
+ }, []string{"api_call", "value"}),
+
+ APILimiterRateLimit: metric.NewGaugeVec(metric.GaugeOpts{
+ ConfigName: Namespace + "_" + SubsystemAPILimiter + "_rate_limit",
+ Namespace: Namespace,
+ Subsystem: SubsystemAPILimiter,
+ Name: "rate_limit",
+ Help: "Current rate limiting configuration",
+ }, []string{"api_call", "value"}),
+
+ APILimiterAdjustmentFactor: metric.NewGaugeVec(metric.GaugeOpts{
+ ConfigName: Namespace + "_" + SubsystemAPILimiter + "_adjustment_factor",
+ Namespace: Namespace,
+ Subsystem: SubsystemAPILimiter,
+ Name: "adjustment_factor",
+ Help: "Current adjustment factor while auto adjusting",
+ }, []string{"api_call"}),
+
+ APILimiterProcessedRequests: metric.NewCounterVec(metric.CounterOpts{
+ ConfigName: Namespace + "_" + SubsystemAPILimiter + "_processed_requests_total",
+ Namespace: Namespace,
+ Subsystem: SubsystemAPILimiter,
+ Name: "processed_requests_total",
+ Help: "Total number of API requests processed",
+ }, []string{"api_call", LabelOutcome}),
+
+ EndpointPropagationDelay: metric.NewHistogramVec(metric.HistogramOpts{
+ ConfigName: Namespace + "_endpoint_propagation_delay_seconds",
+ Namespace: Namespace,
+ Name: "endpoint_propagation_delay_seconds",
+ Help: "CiliumEndpoint roundtrip propagation delay in seconds",
+ Buckets: []float64{.05, .1, 1, 5, 30, 60, 120, 240, 300, 600},
+ }, []string{}),
+
+ NodeConnectivityStatus: metric.NewGaugeVec(metric.GaugeOpts{
+ ConfigName: Namespace + "_node_connectivity_status",
+ Namespace: Namespace,
+ Name: "node_connectivity_status",
+ Help: "The last observed status of both ICMP and HTTP connectivity between the current Cilium agent and other Cilium nodes",
+ }, []string{
+ LabelSourceCluster,
+ LabelSourceNodeName,
+ LabelTargetCluster,
+ LabelTargetNodeName,
+ LabelTargetNodeType,
+ LabelType,
+ }),
+
+ NodeConnectivityLatency: metric.NewGaugeVec(metric.GaugeOpts{
+ ConfigName: Namespace + "_node_connectivity_latency_seconds",
+ Namespace: Namespace,
+ Name: "node_connectivity_latency_seconds",
+ Help: "The last observed latency between the current Cilium agent and other Cilium nodes in seconds",
+ }, []string{
+ LabelSourceCluster,
+ LabelSourceNodeName,
+ LabelTargetCluster,
+ LabelTargetNodeName,
+ LabelTargetNodeIP,
+ LabelTargetNodeType,
+ LabelType,
+ LabelProtocol,
+ LabelAddressType,
+ }),
+
+ WorkQueueDepth: WorkQueueDepth,
+ WorkQueueAddsTotal: WorkQueueAddsTotal,
+ WorkQueueLatency: WorkQueueLatency,
+ WorkQueueDuration: WorkQueueDuration,
+ WorkQueueUnfinishedWork: WorkQueueUnfinishedWork,
+ WorkQueueLongestRunningProcessor: WorkQueueLongestRunningProcessor,
+ WorkQueueRetries: WorkQueueRetries,
+ }
+
+ ifindexOpts := metric.GaugeOpts{
+ ConfigName: Namespace + "_endpoint_max_ifindex",
+ Disabled: !enableIfIndexMetric(),
+ Namespace: Namespace,
+ Name: "endpoint_max_ifindex",
+ Help: "Maximum interface index observed for existing endpoints",
+ }
+ lm.EndpointMaxIfindex = metric.NewGauge(ifindexOpts)
+
+ v := version.GetCiliumVersion()
+ lm.VersionMetric.WithLabelValues(v.Version, v.Revision, v.Arch)
+ lm.BPFMapCapacity.WithLabelValues("default").Set(DefaultMapCapacity)
+
+ BootstrapTimes = lm.BootstrapTimes
+ APIInteractions = lm.APIInteractions
+ NodeConnectivityStatus = lm.NodeConnectivityStatus
+ NodeConnectivityLatency = lm.NodeConnectivityLatency
+ Endpoint = lm.Endpoint
+ EndpointMaxIfindex = lm.EndpointMaxIfindex
+ EndpointRegenerationTotal = lm.EndpointRegenerationTotal
+ EndpointStateCount = lm.EndpointStateCount
+ EndpointRegenerationTimeStats = lm.EndpointRegenerationTimeStats
+ EndpointPropagationDelay = lm.EndpointPropagationDelay
+ Policy = lm.Policy
+ PolicyRegenerationCount = lm.PolicyRegenerationCount
+ PolicyRegenerationTimeStats = lm.PolicyRegenerationTimeStats
+ PolicyRevision = lm.PolicyRevision
+ PolicyChangeTotal = lm.PolicyChangeTotal
+ PolicyEndpointStatus = lm.PolicyEndpointStatus
+ PolicyImplementationDelay = lm.PolicyImplementationDelay
+ CIDRGroupsReferenced = lm.CIDRGroupsReferenced
+ CIDRGroupTranslationTimeStats = lm.CIDRGroupTranslationTimeStats
+ Identity = lm.Identity
+ EventTS = lm.EventTS
+ EventLagK8s = lm.EventLagK8s
+ ProxyRedirects = lm.ProxyRedirects
+ ProxyPolicyL7Total = lm.ProxyPolicyL7Total
+ ProxyUpstreamTime = lm.ProxyUpstreamTime
+ ProxyDatapathUpdateTimeout = lm.ProxyDatapathUpdateTimeout
+ DropCount = lm.DropCount
+ DropBytes = lm.DropBytes
+ ForwardCount = lm.ForwardCount
+ ForwardBytes = lm.ForwardBytes
+ ConntrackGCRuns = lm.ConntrackGCRuns
+ ConntrackGCKeyFallbacks = lm.ConntrackGCKeyFallbacks
+ ConntrackGCSize = lm.ConntrackGCSize
+ NatGCSize = lm.NatGCSize
+ ConntrackGCDuration = lm.ConntrackGCDuration
+ ConntrackDumpResets = lm.ConntrackDumpResets
+ SignalsHandled = lm.SignalsHandled
+ ServicesEventsCount = lm.ServicesEventsCount
+ ErrorsWarnings = lm.ErrorsWarnings
+ ControllerRuns = lm.ControllerRuns
+ ControllerRunsDuration = lm.ControllerRunsDuration
+ SubprocessStart = lm.SubprocessStart
+ KubernetesEventProcessed = lm.KubernetesEventProcessed
+ KubernetesEventReceived = lm.KubernetesEventReceived
+ KubernetesAPIInteractions = lm.KubernetesAPIInteractions
+ KubernetesAPIRateLimiterLatency = lm.KubernetesAPIRateLimiterLatency
+ KubernetesAPICallsTotal = lm.KubernetesAPICallsTotal
+ KubernetesCNPStatusCompletion = lm.KubernetesCNPStatusCompletion
+ TerminatingEndpointsEvents = lm.TerminatingEndpointsEvents
+ IPAMEvent = lm.IPAMEvent
+ IPAMCapacity = lm.IPAMCapacity
+ KVStoreOperationsDuration = lm.KVStoreOperationsDuration
+ KVStoreEventsQueueDuration = lm.KVStoreEventsQueueDuration
+ KVStoreQuorumErrors = lm.KVStoreQuorumErrors
+ FQDNGarbageCollectorCleanedTotal = lm.FQDNGarbageCollectorCleanedTotal
+ FQDNActiveNames = lm.FQDNActiveNames
+ FQDNActiveIPs = lm.FQDNActiveIPs
+ FQDNAliveZombieConnections = lm.FQDNAliveZombieConnections
+ FQDNSemaphoreRejectedTotal = lm.FQDNSemaphoreRejectedTotal
+ IPCacheErrorsTotal = lm.IPCacheErrorsTotal
+ IPCacheEventsTotal = lm.IPCacheEventsTotal
+ BPFSyscallDuration = lm.BPFSyscallDuration
+ BPFMapOps = lm.BPFMapOps
+ BPFMapCapacity = lm.BPFMapCapacity
+ TriggerPolicyUpdateTotal = lm.TriggerPolicyUpdateTotal
+ TriggerPolicyUpdateFolds = lm.TriggerPolicyUpdateFolds
+ TriggerPolicyUpdateCallDuration = lm.TriggerPolicyUpdateCallDuration
+ VersionMetric = lm.VersionMetric
+ APILimiterWaitHistoryDuration = lm.APILimiterWaitHistoryDuration
+ APILimiterWaitDuration = lm.APILimiterWaitDuration
+ APILimiterProcessingDuration = lm.APILimiterProcessingDuration
+ APILimiterRequestsInFlight = lm.APILimiterRequestsInFlight
+ APILimiterRateLimit = lm.APILimiterRateLimit
+ APILimiterAdjustmentFactor = lm.APILimiterAdjustmentFactor
+ APILimiterProcessedRequests = lm.APILimiterProcessedRequests
+
+ return lm
+}
+
+// GaugeWithThreshold is a prometheus gauge that registers itself with
+// prometheus if over a threshold value and unregisters when under.
+type GaugeWithThreshold struct {
+ gauge prometheus.Gauge
+ threshold float64
+ active bool
+}
+
+// Set the value of the GaugeWithThreshold.
+func (gwt *GaugeWithThreshold) Set(value float64) {
+ overThreshold := value > gwt.threshold
+ if gwt.active && !overThreshold {
+ gwt.active = !Unregister(gwt.gauge)
+ if gwt.active {
+ logrus.WithField("metric", gwt.gauge.Desc().String()).Warning("Failed to unregister metric")
+ }
+ } else if !gwt.active && overThreshold {
+ err := Register(gwt.gauge)
+ gwt.active = err == nil
+ if err != nil {
+ logrus.WithField("metric", gwt.gauge.Desc().String()).WithError(err).Warning("Failed to register metric")
+ }
+ }
+
+ gwt.gauge.Set(value)
+}
+
+// NewGaugeWithThreshold creates a new GaugeWithThreshold.
+func NewGaugeWithThreshold(name string, subsystem string, desc string, labels map[string]string, threshold float64) *GaugeWithThreshold {
+ return &GaugeWithThreshold{
+ gauge: prometheus.NewGauge(prometheus.GaugeOpts{
+ Namespace: Namespace,
+ Subsystem: subsystem,
+ Name: name,
+ Help: desc,
+ ConstLabels: labels,
+ }),
+ threshold: threshold,
+ active: false,
+ }
+}
+
+// NewBPFMapPressureGauge creates a new GaugeWithThreshold for the
+// cilium_bpf_map_pressure metric with the map name as constant label.
+func NewBPFMapPressureGauge(mapname string, threshold float64) *GaugeWithThreshold {
+ return NewGaugeWithThreshold(
+ "map_pressure",
+ SubsystemBPF,
+ "Fill percentage of map, tagged by map name",
+ map[string]string{
+ LabelMapName: mapname,
+ },
+ threshold,
+ )
+}
+
+func Reinitialize() {
+ reg, err := registry.Await(context.Background())
+ if err == nil {
+ reg.Reinitialize()
+ }
+}
+
+// Register registers a collector
+func Register(c prometheus.Collector) error {
+ withRegistry(func(reg *Registry) {
+ reg.Register(c)
+ })
+
+ return nil
+}
+
+// RegisterList registers a list of collectors. If registration of one
+// collector fails, no collector is registered.
+func RegisterList(list []prometheus.Collector) error {
+ withRegistry(func(reg *Registry) {
+ reg.RegisterList(list)
+ })
+
+ return nil
+}
+
+// Unregister unregisters a collector
+func Unregister(c prometheus.Collector) bool {
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second)
+ defer cancel()
+ reg, err := registry.Await(ctx)
+ if err == nil {
+ return reg.Unregister(c)
+ }
+
+ return false
+}
+
+// DumpMetrics gets the current Cilium metrics and dumps all into a
+// models.Metrics structure.If metrics cannot be retrieved, returns an error
+func DumpMetrics() ([]*models.Metric, error) {
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second)
+ defer cancel()
+ reg, err := registry.Await(ctx)
+ if err == nil {
+ return reg.DumpMetrics()
+ }
+
+ return nil, nil
+}
+
+// withRegistry waits up to 1 second for the registry promise to resolve, if it does not then
+// we might be calling this function before hive has been started, so to avoid a deadlock,
+// wait in a routine so actions are deferred until the registry is initialized.
+func withRegistry(fn func(reg *Registry)) {
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second)
+ reg, err := registry.Await(ctx)
+ if err == nil {
+ fn(reg)
+ cancel()
+ return
+ }
+ cancel()
+
+ go func() {
+ reg, err := registry.Await(context.Background())
+ if err == nil {
+ fn(reg)
+ }
+ }()
+}
+
+// GetCounterValue returns the current value
+// stored for the counter
+func GetCounterValue(m prometheus.Counter) float64 {
+ var pm dto.Metric
+ err := m.Write(&pm)
+ if err == nil && pm.Counter != nil && pm.Counter.Value != nil {
+ return *pm.Counter.Value
+ }
+ return 0
+}
+
+// GetGaugeValue returns the current value stored for the gauge. This function
+// is useful in tests.
+func GetGaugeValue(m prometheus.Gauge) float64 {
+ var pm dto.Metric
+ err := m.Write(&pm)
+ if err == nil && pm.Gauge != nil && pm.Gauge.Value != nil {
+ return *pm.Gauge.Value
+ }
+ return 0
+}
+
+// Error2Outcome converts an error to LabelOutcome
+func Error2Outcome(err error) string {
+ if err != nil {
+ return LabelValueOutcomeFail
+ }
+
+ return LabelValueOutcomeSuccess
+}
+
+func BoolToFloat64(v bool) float64 {
+ if v {
+ return 1
+ }
+ return 0
+}
+
+// In general, most bpf maps are allocated to occupy a 16-bit key size.
+// To reduce the number of metrics that need to be emitted for map capacity,
+// we assume a default map size of 2^16 entries for all maps, which can be
+// assumed unless specified otherwise.
+const DefaultMapCapacity = 65536
+
+func UpdateMapCapacity(groupName string, capacity uint32) {
+ if capacity == 0 || capacity == DefaultMapCapacity {
+ return
+ }
+ BPFMapCapacity.WithLabelValues(groupName).Set(float64(capacity))
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/metrics/metrics_unix.go b/vendor/github.com/cilium/cilium/pkg/metrics/metrics_unix.go
new file mode 100644
index 000000000..184d3baa7
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/metrics/metrics_unix.go
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+//go:build !windows
+
+package metrics
+
+import (
+ "golang.org/x/sys/unix"
+
+ "github.com/cilium/cilium/pkg/datapath/linux/probes"
+)
+
+// Errno2Outcome converts a unix.Errno to LabelOutcome
+func Errno2Outcome(errno unix.Errno) string {
+ if errno != 0 {
+ return LabelValueOutcomeFail
+ }
+
+ return LabelValueOutcomeSuccess
+}
+
+func enableIfIndexMetric() bool {
+ // On kernels which do not provide ifindex via the FIB, Cilium needs
+ // to store it in the CT map, with a field limit of max(uint16).
+ // The EndpointMaxIfindex metric can be used to determine if that
+ // limit is approaching. However, it should only be enabled by
+ // default if we observe that the FIB is not providing the ifindex.
+ return probes.HaveFibIfindex() != nil
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/metrics/metrics_windows.go b/vendor/github.com/cilium/cilium/pkg/metrics/metrics_windows.go
new file mode 100644
index 000000000..dc4333ab3
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/metrics/metrics_windows.go
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package metrics
+
+func enableIfIndexMetric() bool {
+ return false
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/metrics/middleware.go b/vendor/github.com/cilium/cilium/pkg/metrics/middleware.go
new file mode 100644
index 000000000..be13472a1
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/metrics/middleware.go
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package metrics
+
+import (
+ "net/http"
+ "strconv"
+ "strings"
+
+ "github.com/cilium/cilium/pkg/metrics/metric"
+ "github.com/cilium/cilium/pkg/spanstat"
+)
+
+// APIEventTSHelper is intended to be a global middleware to track metrics
+// around API calls.
+// It records the timestamp of an API call in the provided gauge.
+type APIEventTSHelper struct {
+ Next http.Handler
+ TSGauge metric.Vec[metric.Gauge]
+ Histogram metric.Vec[metric.Observer]
+}
+
+type ResponderWrapper struct {
+ http.ResponseWriter
+ code int
+}
+
+func (rw *ResponderWrapper) WriteHeader(code int) {
+ rw.code = code
+ rw.ResponseWriter.WriteHeader(code)
+}
+
+// getShortPath returns the API path trimmed after the 3rd slash.
+// examples:
+//
+// "/v1/config" -> "/v1/config"
+// "/v1/endpoint/cilium-local:0" -> "/v1/endpoint"
+// "/v1/endpoint/container-id:597.." -> "/v1/endpoint"
+func getShortPath(s string) string {
+ var idxSum int
+ for nThSlash := 0; nThSlash < 3; nThSlash++ {
+ idx := strings.IndexByte(s[idxSum:], '/')
+ if idx == -1 {
+ return s
+ }
+ idxSum += idx + 1
+ }
+ return s[:idxSum-1]
+}
+
+// ServeHTTP implements the http.Handler interface. It records the timestamp
+// this API call began at, then chains to the next handler.
+func (m *APIEventTSHelper) ServeHTTP(r http.ResponseWriter, req *http.Request) {
+ reqOk := req != nil && req.URL != nil && req.URL.Path != ""
+ var path string
+ if reqOk {
+ path = getShortPath(req.URL.Path)
+ m.TSGauge.WithLabelValues(LabelEventSourceAPI, path, req.Method).SetToCurrentTime()
+ }
+ duration := spanstat.Start()
+ rw := &ResponderWrapper{ResponseWriter: r}
+ m.Next.ServeHTTP(rw, req)
+ if reqOk {
+ took := float64(duration.End(true).Total().Seconds())
+ m.Histogram.WithLabelValues(path, req.Method, strconv.Itoa(rw.code)).Observe(took)
+ }
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/metrics/registry.go b/vendor/github.com/cilium/cilium/pkg/metrics/registry.go
new file mode 100644
index 000000000..a5a4af4ba
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/metrics/registry.go
@@ -0,0 +1,249 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package metrics
+
+import (
+ "errors"
+ "net/http"
+ "regexp"
+ "strings"
+
+ "github.com/cilium/cilium/api/v1/models"
+ "github.com/cilium/cilium/pkg/hive"
+ "github.com/cilium/cilium/pkg/hive/cell"
+ metricpkg "github.com/cilium/cilium/pkg/metrics/metric"
+ "github.com/cilium/cilium/pkg/option"
+
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/collectors"
+ "github.com/prometheus/client_golang/prometheus/promhttp"
+ dto "github.com/prometheus/client_model/go"
+ "github.com/sirupsen/logrus"
+ "github.com/spf13/pflag"
+)
+
+var defaultRegistryConfig = RegistryConfig{
+ PrometheusServeAddr: ":9962",
+}
+
+type RegistryConfig struct {
+ // PrometheusServeAddr IP:Port on which to serve prometheus metrics (pass ":Port" to bind on all interfaces, "" is off)
+ PrometheusServeAddr string
+ // This is a list of metrics to be enabled or disabled, format is `+`/`-` + `{metric name}`
+ Metrics []string
+}
+
+func (rc RegistryConfig) Flags(flags *pflag.FlagSet) {
+ flags.String("prometheus-serve-addr", rc.PrometheusServeAddr, "IP:Port on which to serve prometheus metrics (pass \":Port\" to bind on all interfaces, \"\" is off)")
+ flags.StringSlice("metrics", rc.Metrics, "Metrics that should be enabled or disabled from the default metric list. (+metric_foo to enable metric_foo, -metric_bar to disable metric_bar)")
+}
+
+// RegistryParams are the parameters needed to construct a Registry
+type RegistryParams struct {
+ cell.In
+
+ Logger logrus.FieldLogger
+ Shutdowner hive.Shutdowner
+ Lifecycle hive.Lifecycle
+
+ AutoMetrics []metricpkg.WithMetadata `group:"hive-metrics"`
+ Config RegistryConfig
+
+ DaemonConfig *option.DaemonConfig
+}
+
+// Registry is a cell around a prometheus registry. This registry starts an HTTP server as part of its lifecycle
+// on which all enabled metrics will be available. A reference to this registry can also be used to dynamically
+// register or unregister `prometheus.Collector`s.
+type Registry struct {
+ inner *prometheus.Registry
+
+ params RegistryParams
+}
+
+func NewRegistry(params RegistryParams) *Registry {
+ reg := &Registry{
+ params: params,
+ }
+
+ reg.Reinitialize()
+
+ // Resolve the global registry variable for as long as we still have global functions
+ registryResolver.Resolve(reg)
+
+ if params.Config.PrometheusServeAddr != "" {
+ // The Handler function provides a default handler to expose metrics
+ // via an HTTP server. "/metrics" is the usual endpoint for that.
+ mux := http.NewServeMux()
+ mux.Handle("/metrics", promhttp.HandlerFor(reg.inner, promhttp.HandlerOpts{}))
+ srv := http.Server{
+ Addr: params.Config.PrometheusServeAddr,
+ Handler: mux,
+ }
+
+ params.Lifecycle.Append(hive.Hook{
+ OnStart: func(hc hive.HookContext) error {
+ go func() {
+ params.Logger.Infof("Serving prometheus metrics on %s", params.Config.PrometheusServeAddr)
+ err := srv.ListenAndServe()
+ if err != nil && !errors.Is(err, http.ErrServerClosed) {
+ params.Shutdowner.Shutdown(hive.ShutdownWithError(err))
+ }
+ }()
+ return nil
+ },
+ OnStop: func(hc hive.HookContext) error {
+ return srv.Shutdown(hc)
+ },
+ })
+ }
+
+ return reg
+}
+
+// Register registers a collector
+func (r *Registry) Register(c prometheus.Collector) error {
+ return r.inner.Register(c)
+}
+
+// Unregister unregisters a collector
+func (r *Registry) Unregister(c prometheus.Collector) bool {
+ return r.inner.Unregister(c)
+}
+
+// goCustomCollectorsRX tracks enabled go runtime metrics.
+var goCustomCollectorsRX = regexp.MustCompile(`^/sched/latencies:seconds`)
+
+// Reinitialize creates a new internal registry and re-registers metrics to it.
+func (r *Registry) Reinitialize() {
+ r.inner = prometheus.NewPedanticRegistry()
+
+ // Default metrics which can't be disabled.
+ r.MustRegister(collectors.NewProcessCollector(collectors.ProcessCollectorOpts{Namespace: Namespace}))
+ r.MustRegister(collectors.NewGoCollector(
+ collectors.WithGoCollectorRuntimeMetrics(
+ collectors.GoRuntimeMetricsRule{Matcher: goCustomCollectorsRX},
+ )))
+ r.MustRegister(newStatusCollector())
+ r.MustRegister(newbpfCollector())
+
+ metrics := make(map[string]metricpkg.WithMetadata)
+ for i, autoMetric := range r.params.AutoMetrics {
+ metrics[autoMetric.Opts().GetConfigName()] = r.params.AutoMetrics[i]
+ }
+
+ // This is a bodge for a very specific feature, inherited from the old `Daemon.additionalMetrics`.
+ // We should really find a more generic way to handle such cases.
+ metricFlags := r.params.Config.Metrics
+ if r.params.DaemonConfig.DNSProxyConcurrencyLimit > 0 {
+ metricFlags = append(metricFlags, "+"+Namespace+"_"+SubsystemFQDN+"_semaphore_rejected_total")
+ }
+
+ for _, metricFlag := range metricFlags {
+ metricFlag = strings.TrimSpace(metricFlag)
+
+ // This is a temporary hack which allows us to get rid of the centralized metric config without refactoring the
+ // dynamic map pressure registration/unregistion mechanism.
+ // Long term the map pressure metric becomes a smarter component so this is no longer needed.
+ if metricFlag[1:] == "-"+Namespace+"_"+SubsystemBPF+"_map_pressure" {
+ BPFMapPressure = false
+ continue
+ }
+
+ metric := metrics[metricFlag[1:]]
+ if metric == nil {
+ continue
+ }
+
+ switch metricFlag[0] {
+ case '+':
+ metric.SetEnabled(true)
+ case '-':
+ metric.SetEnabled(false)
+ default:
+ r.params.Logger.Warning(
+ "--metrics flag contains value which does not start with + or -, '%s', ignoring",
+ metricFlag,
+ )
+ }
+ }
+
+ for _, m := range metrics {
+ if c, ok := m.(prometheus.Collector); ok {
+ r.MustRegister(c)
+ }
+ }
+}
+
+// MustRegister adds the collector to the registry, exposing this metric to
+// prometheus scrapes.
+// It will panic on error.
+func (r *Registry) MustRegister(c ...prometheus.Collector) {
+ r.inner.MustRegister(c...)
+}
+
+// RegisterList registers a list of collectors. If registration of one
+// collector fails, no collector is registered.
+func (r *Registry) RegisterList(list []prometheus.Collector) error {
+ registered := []prometheus.Collector{}
+
+ for _, c := range list {
+ if err := r.Register(c); err != nil {
+ for _, c := range registered {
+ r.Unregister(c)
+ }
+ return err
+ }
+
+ registered = append(registered, c)
+ }
+
+ return nil
+}
+
+// DumpMetrics gets the current Cilium metrics and dumps all into a
+// models.Metrics structure.If metrics cannot be retrieved, returns an error
+func (r *Registry) DumpMetrics() ([]*models.Metric, error) {
+ result := []*models.Metric{}
+ currentMetrics, err := r.inner.Gather()
+ if err != nil {
+ return result, err
+ }
+
+ for _, val := range currentMetrics {
+ metricName := val.GetName()
+ metricType := val.GetType()
+
+ for _, metricLabel := range val.Metric {
+ labels := map[string]string{}
+ for _, label := range metricLabel.GetLabel() {
+ labels[label.GetName()] = label.GetValue()
+ }
+
+ var value float64
+ switch metricType {
+ case dto.MetricType_COUNTER:
+ value = metricLabel.Counter.GetValue()
+ case dto.MetricType_GAUGE:
+ value = metricLabel.GetGauge().GetValue()
+ case dto.MetricType_UNTYPED:
+ value = metricLabel.GetUntyped().GetValue()
+ case dto.MetricType_SUMMARY:
+ value = metricLabel.GetSummary().GetSampleSum()
+ case dto.MetricType_HISTOGRAM:
+ value = metricLabel.GetHistogram().GetSampleSum()
+ default:
+ continue
+ }
+
+ metric := &models.Metric{
+ Name: metricName,
+ Labels: labels,
+ Value: value,
+ }
+ result = append(result, metric)
+ }
+ }
+ return result, nil
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/metrics/status.go b/vendor/github.com/cilium/cilium/pkg/metrics/status.go
new file mode 100644
index 000000000..98c4c9d9e
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/metrics/status.go
@@ -0,0 +1,162 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package metrics
+
+import (
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/sirupsen/logrus"
+
+ clientPkg "github.com/cilium/cilium/pkg/client"
+ healthClientPkg "github.com/cilium/cilium/pkg/health/client"
+)
+
+type statusCollector struct {
+ daemonHealthGetter daemonHealthGetter
+ connectivityStatusGetter connectivityStatusGetter
+
+ controllersFailingDesc *prometheus.Desc
+ ipAddressesDesc *prometheus.Desc
+ unreachableNodesDesc *prometheus.Desc
+ unreachableHealthEndpointsDesc *prometheus.Desc
+}
+
+func newStatusCollector() *statusCollector {
+ ciliumClient, err := clientPkg.NewClient("")
+ if err != nil {
+ logrus.WithError(err).Fatal("Error while creating Cilium API client")
+ }
+
+ healthClient, err := healthClientPkg.NewClient("")
+ if err != nil {
+ logrus.WithError(err).Fatal("Error while creating cilium-health API client")
+ }
+
+ return newStatusCollectorWithClients(ciliumClient.Daemon, healthClient.Connectivity)
+}
+
+// newStatusCollectorWithClients provides a constructor with injected clients
+func newStatusCollectorWithClients(d daemonHealthGetter, c connectivityStatusGetter) *statusCollector {
+ return &statusCollector{
+ daemonHealthGetter: d,
+ connectivityStatusGetter: c,
+ controllersFailingDesc: prometheus.NewDesc(
+ prometheus.BuildFQName(Namespace, "", "controllers_failing"),
+ "Number of failing controllers",
+ nil, nil,
+ ),
+ ipAddressesDesc: prometheus.NewDesc(
+ prometheus.BuildFQName(Namespace, "", "ip_addresses"),
+ "Number of allocated IP addresses",
+ []string{"family"}, nil,
+ ),
+ unreachableNodesDesc: prometheus.NewDesc(
+ prometheus.BuildFQName(Namespace, "", "unreachable_nodes"),
+ "Number of nodes that cannot be reached",
+ nil, nil,
+ ),
+ unreachableHealthEndpointsDesc: prometheus.NewDesc(
+ prometheus.BuildFQName(Namespace, "", "unreachable_health_endpoints"),
+ "Number of health endpoints that cannot be reached",
+ nil, nil,
+ ),
+ }
+}
+
+func (s *statusCollector) Describe(ch chan<- *prometheus.Desc) {
+ ch <- s.controllersFailingDesc
+ ch <- s.ipAddressesDesc
+ ch <- s.unreachableNodesDesc
+ ch <- s.unreachableHealthEndpointsDesc
+}
+
+func (s *statusCollector) Collect(ch chan<- prometheus.Metric) {
+ statusResponse, err := s.daemonHealthGetter.GetHealthz(nil)
+ if err != nil {
+ logrus.WithError(err).Error("Error while getting Cilium status")
+ return
+ }
+
+ if statusResponse.Payload == nil {
+ return
+ }
+
+ // Controllers failing
+ controllersFailing := 0
+
+ for _, ctrl := range statusResponse.Payload.Controllers {
+ if ctrl.Status == nil {
+ continue
+ }
+ if ctrl.Status.ConsecutiveFailureCount > 0 {
+ controllersFailing++
+ }
+ }
+
+ ch <- prometheus.MustNewConstMetric(
+ s.controllersFailingDesc,
+ prometheus.GaugeValue,
+ float64(controllersFailing),
+ )
+
+ if statusResponse.Payload.Ipam != nil {
+ // Address count
+ ch <- prometheus.MustNewConstMetric(
+ s.ipAddressesDesc,
+ prometheus.GaugeValue,
+ float64(len(statusResponse.Payload.Ipam.IPV4)),
+ "ipv4",
+ )
+
+ ch <- prometheus.MustNewConstMetric(
+ s.ipAddressesDesc,
+ prometheus.GaugeValue,
+ float64(len(statusResponse.Payload.Ipam.IPV6)),
+ "ipv6",
+ )
+ }
+
+ healthStatusResponse, err := s.connectivityStatusGetter.GetStatus(nil)
+ if err != nil {
+ logrus.WithError(err).Error("Error while getting cilium-health status")
+ return
+ }
+
+ if healthStatusResponse.Payload == nil {
+ return
+ }
+
+ // Nodes and endpoints healthStatusResponse
+ var (
+ unreachableNodes int
+ unreachableEndpoints int
+ )
+
+ for _, nodeStatus := range healthStatusResponse.Payload.Nodes {
+ for _, addr := range healthClientPkg.GetAllHostAddresses(nodeStatus) {
+ if healthClientPkg.GetPathConnectivityStatusType(addr) == healthClientPkg.ConnStatusUnreachable {
+ unreachableNodes++
+ break
+ }
+ }
+
+ for _, addr := range healthClientPkg.GetAllEndpointAddresses(nodeStatus) {
+ if healthClientPkg.GetPathConnectivityStatusType(addr) == healthClientPkg.ConnStatusUnreachable {
+ unreachableEndpoints++
+ break
+ }
+ }
+ }
+
+ ch <- prometheus.MustNewConstMetric(
+ s.unreachableNodesDesc,
+ prometheus.GaugeValue,
+ float64(unreachableNodes),
+ )
+
+ ch <- prometheus.MustNewConstMetric(
+ s.unreachableHealthEndpointsDesc,
+ prometheus.GaugeValue,
+ float64(unreachableEndpoints),
+ )
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/monitor/api/drop.go b/vendor/github.com/cilium/cilium/pkg/monitor/api/drop.go
index 5c64bf94d..8c2371eb9 100644
--- a/vendor/github.com/cilium/cilium/pkg/monitor/api/drop.go
+++ b/vendor/github.com/cilium/cilium/pkg/monitor/api/drop.go
@@ -90,6 +90,9 @@ var errors = map[uint8]string{
192: "Invalid ClusterID",
193: "Unsupported packet protocol for DSR encapsulation",
194: "No egress gateway found",
+ 195: "Traffic is unencrypted",
+ 196: "TTL exceeded",
+ 197: "No node ID found",
}
func extendedReason(reason uint8, extError int8) string {
diff --git a/vendor/github.com/cilium/cilium/pkg/mtu/detect.go b/vendor/github.com/cilium/cilium/pkg/mtu/detect.go
new file mode 100644
index 000000000..f39b65de1
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/mtu/detect.go
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package mtu
+
+import (
+ "github.com/cilium/cilium/pkg/logging"
+ "github.com/cilium/cilium/pkg/logging/logfields"
+)
+
+var log = logging.DefaultLogger.WithField(logfields.LogSubsys, "mtu")
diff --git a/vendor/github.com/cilium/cilium/pkg/mtu/detect_linux.go b/vendor/github.com/cilium/cilium/pkg/mtu/detect_linux.go
new file mode 100644
index 000000000..baff35562
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/mtu/detect_linux.go
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+//go:build linux
+
+package mtu
+
+import (
+ "fmt"
+ "net"
+
+ "github.com/sirupsen/logrus"
+ "github.com/vishvananda/netlink"
+
+ "github.com/cilium/cilium/pkg/logging/logfields"
+)
+
+const (
+ // externalProbeIPv4 is an IPv4 address specifically designed for tests. We
+ // only want to retrieve default route for external IP addresses, thus it
+ // doesn't need to be a real address.
+ externalProbeIPv4 = "203.0.113.1"
+
+ // externalProbeIPv6 is an IPv4 address specifically designed for tests. We
+ // only want to retrieve default route for external IP addresses, thus it
+ // doesn't need to be a real address.
+ externalProbeIPv6 = "2001:db8::1"
+)
+
+func getRoute(externalProbe string) ([]netlink.Route, error) {
+ ip := net.ParseIP(externalProbe)
+ if ip == nil {
+ return nil, fmt.Errorf("unable to parse IP %s", externalProbe)
+ }
+
+ routes, err := netlink.RouteGet(ip)
+ if err != nil {
+ return nil, fmt.Errorf("unable to lookup route to %s: %w", externalProbe, err)
+ }
+
+ if len(routes) == 0 {
+ return nil, fmt.Errorf("no route to %s", externalProbe)
+ }
+
+ return routes, nil
+}
+
+func autoDetect() (int, error) {
+ var routes []netlink.Route
+ var err error
+
+ routes, err = getRoute(externalProbeIPv4)
+ if err != nil {
+ prevErr := err
+ routes, err = getRoute(externalProbeIPv6)
+ if err != nil {
+ return 0, fmt.Errorf("%v, %v", err.Error(), prevErr.Error())
+ }
+ }
+
+ if routes[0].Gw == nil {
+ return 0, fmt.Errorf("unable to find default gateway from the routes: %s", routes)
+ }
+
+ link, err := netlink.LinkByIndex(routes[0].LinkIndex)
+ if err != nil {
+ return 0, fmt.Errorf("unable to find interface of default route: %w", err)
+ }
+
+ if mtu := link.Attrs().MTU; mtu != 0 {
+ log.Infof("Detected MTU %d", mtu)
+ return mtu, nil
+ }
+
+ return EthernetMTU, nil
+}
+
+// getMTUFromIf finds the interface that holds the ip and returns its mtu
+func getMTUFromIf(ip net.IP) (int, error) {
+ ifaces, err := netlink.LinkList()
+ if err != nil {
+ return 0, fmt.Errorf("unable to list interfaces: %w", err)
+ }
+
+ for _, iface := range ifaces {
+ addrs, err := netlink.AddrList(iface, netlink.FAMILY_ALL)
+ if err != nil {
+ log.WithFields(logrus.Fields{
+ logfields.Device: iface.Attrs().Name,
+ }).Warning("Unable to list all addresses")
+ continue
+ }
+
+ for _, addr := range addrs {
+ if addr.IPNet.IP.Equal(ip) {
+ myMTU := iface.Attrs().MTU
+ log.WithFields(logrus.Fields{
+ logfields.Device: iface.Attrs().Name,
+ logfields.IPAddr: ip,
+ logfields.MTU: myMTU,
+ }).Info("Inheriting MTU from external network interface")
+ return myMTU, nil
+ }
+ }
+ }
+ return 0, fmt.Errorf("No interface contains the provided ip: %v", ip)
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/mtu/detect_other.go b/vendor/github.com/cilium/cilium/pkg/mtu/detect_other.go
new file mode 100644
index 000000000..74080821b
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/mtu/detect_other.go
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+//go:build !linux
+
+package mtu
+
+import "net"
+
+func autoDetect() (int, error) {
+ return EthernetMTU, nil
+}
+
+func getMTUFromIf(net.IP) (int, error) {
+ return EthernetMTU, nil
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/mtu/doc.go b/vendor/github.com/cilium/cilium/pkg/mtu/doc.go
new file mode 100644
index 000000000..769179981
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/mtu/doc.go
@@ -0,0 +1,6 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Package mtu is a library for tracking and configuring MTU for devices and
+// routes.
+package mtu
diff --git a/vendor/github.com/cilium/cilium/pkg/mtu/mtu.go b/vendor/github.com/cilium/cilium/pkg/mtu/mtu.go
new file mode 100644
index 000000000..a36050418
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/mtu/mtu.go
@@ -0,0 +1,203 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package mtu
+
+import (
+ "net"
+)
+
+const (
+ // MaxMTU is the highest MTU that can be used for devices and routes
+ // handled by Cilium. It will typically be used to configure inbound
+ // paths towards containers where it is guaranteed that the packet will
+ // not be rerouted to another node, and therefore will not lead to
+ // any form of IP fragmentation.
+ // One might expect this to be 65535, however Linux seems to cap the
+ // MTU of routes at 65520, so we use this value below.
+ MaxMTU = 65520
+
+ // EthernetMTU is the standard MTU for Ethernet devices. It is used
+ // as the MTU for container devices when running direct routing mode.
+ EthernetMTU = 1500
+
+ // TunnelOverhead is an approximation for bytes used for tunnel
+ // encapsulation. It accounts for:
+ // (Outer ethernet is not accounted against MTU size)
+ // Outer IPv4 header: 20B
+ // Outer UDP header: 8B
+ // Outer VXLAN header: 8B
+ // Original Ethernet: 14B
+ // ---
+ // Total extra bytes: 50B
+ TunnelOverhead = 50
+
+ // DsrTunnelOverhead is about the GENEVE DSR option that gets inserted
+ // by the LB, when addressing a Service in hs-ipcache mode
+ DsrTunnelOverhead = 12
+
+ // EncryptionIPsecOverhead is an approximation for bytes used for
+ // encryption. Depending on key size and encryption type the actual
+ // size may vary here we do calculations for 128B keys and Auth. The
+ // overhead is accounted for as:
+ // Outer IP header: 20B
+ // SPI: 4B
+ // Sequece Numbers: 4B
+ // Next Header: 1B
+ // ICV: 16B
+ // Padding: 16B
+ // 128bit Auth: 16B
+ // ---
+ // Total extra bytes: 77B
+ EncryptionIPsecOverhead = 77
+
+ // EncryptionDefaultAuthKeyLength is 16 representing 128B key recommended
+ // size for GCM(AES*) in RFC4106. Users may input other lengths via
+ // key secrets.
+ EncryptionDefaultAuthKeyLength = 16
+
+ // WireguardOverhead is an approximation for the overhead of WireGuard
+ // encapsulation.
+ //
+ // https://github.com/torvalds/linux/blob/v5.12/drivers/net/wireguard/device.c#L262:
+ // MESSAGE_MINIMUM_LENGTH: 32B
+ // Outer IPv4 or IPv6 header: 40B
+ // Outer UDP header: 8B
+ // ---
+ // Total extra bytes: 80B
+ WireguardOverhead = 80
+)
+
+// Configuration is an MTU configuration as returned by NewConfiguration
+type Configuration struct {
+ // standardMTU is the regular MTU used for configuring devices and
+ // routes where packets are expected to be delivered outside the node.
+ //
+ // Note that this is a singleton for the process including this
+ // package. This means, for instance, that when using this from the
+ // ``pkg/plugins/*`` sources, it will not respect the settings
+ // configured inside the ``daemon/``.
+ standardMTU int
+
+ // tunnelMTU is the MTU used for configuring a tunnel mesh for
+ // inter-node connectivity.
+ //
+ // Similar to StandardMTU, this is a singleton for the process.
+ tunnelMTU int
+
+ // preEncrypMTU is the MTU used for configurations of a encryption route.
+ // If tunneling is enabled the tunnelMTU is used which will include
+ // additional encryption overhead if needed.
+ preEncryptMTU int
+
+ // postEncryptMTU is the MTU used for configurations of a encryption
+ // route _after_ encryption tags have been addded. These will be used
+ // in the encryption routing table. The MTU accounts for the tunnel
+ // overhead, if any, but assumes packets are already encrypted.
+ postEncryptMTU int
+
+ encapEnabled bool
+ encryptEnabled bool
+ wireguardEnabled bool
+}
+
+// NewConfiguration returns a new MTU configuration. The MTU can be manually
+// specified, otherwise it will be automatically detected. if encapEnabled is
+// true, the MTU is adjusted to account for encapsulation overhead for all
+// routes involved in node to node communication.
+func NewConfiguration(authKeySize int, encryptEnabled bool, encapEnabled bool, wireguardEnabled bool, hsIpcacheDSRenabled bool, mtu int, mtuDetectIP net.IP) Configuration {
+ encryptOverhead := 0
+
+ if mtu == 0 {
+ var err error
+
+ if mtuDetectIP != nil {
+ mtu, err = getMTUFromIf(mtuDetectIP)
+ } else {
+ mtu, err = autoDetect()
+ }
+ if err != nil {
+ log.WithError(err).Warning("Unable to automatically detect MTU")
+ mtu = EthernetMTU
+ }
+ }
+
+ if encryptEnabled {
+ // Add the difference between the default and the actual key sizes here
+ // to account for users specifying non-default auth key lengths.
+ encryptOverhead = EncryptionIPsecOverhead + (authKeySize - EncryptionDefaultAuthKeyLength)
+ }
+
+ fullTunnelOverhead := TunnelOverhead
+ if hsIpcacheDSRenabled {
+ fullTunnelOverhead += DsrTunnelOverhead
+ }
+
+ conf := Configuration{
+ standardMTU: mtu,
+ tunnelMTU: mtu - (fullTunnelOverhead + encryptOverhead),
+ postEncryptMTU: mtu - TunnelOverhead,
+ preEncryptMTU: mtu - encryptOverhead,
+ encapEnabled: encapEnabled,
+ encryptEnabled: encryptEnabled,
+ wireguardEnabled: wireguardEnabled,
+ }
+
+ if conf.tunnelMTU < 0 {
+ conf.tunnelMTU = 0
+ }
+
+ return conf
+}
+
+// GetRoutePostEncryptMTU return the MTU to be used on the encryption routing
+// table. This is the MTU without encryption overhead and in the tunnel
+// case accounts for the tunnel overhead.
+func (c *Configuration) GetRoutePostEncryptMTU() int {
+ if c.encapEnabled {
+ if c.postEncryptMTU == 0 {
+ return EthernetMTU - TunnelOverhead
+ }
+ return c.postEncryptMTU
+
+ }
+ return c.GetDeviceMTU()
+}
+
+// GetRouteMTU returns the MTU to be used on the network. When running in
+// tunneling mode and/or with encryption enabled, this will have tunnel and
+// encryption overhead accounted for.
+func (c *Configuration) GetRouteMTU() int {
+ if c.wireguardEnabled {
+ return c.GetDeviceMTU() - WireguardOverhead
+ }
+
+ if !c.encapEnabled && !c.encryptEnabled {
+ return c.GetDeviceMTU()
+ }
+
+ if c.encryptEnabled && !c.encapEnabled {
+ if c.preEncryptMTU == 0 {
+ return EthernetMTU - EncryptionIPsecOverhead
+ }
+ return c.preEncryptMTU
+ }
+
+ if c.tunnelMTU == 0 {
+ if c.encryptEnabled {
+ return EthernetMTU - (TunnelOverhead + EncryptionIPsecOverhead)
+ }
+ return EthernetMTU - TunnelOverhead
+ }
+
+ return c.tunnelMTU
+}
+
+// GetDeviceMTU returns the MTU to be used on workload facing devices.
+func (c *Configuration) GetDeviceMTU() int {
+ if c.standardMTU == 0 {
+ return EthernetMTU
+ }
+
+ return c.standardMTU
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/node/address.go b/vendor/github.com/cilium/cilium/pkg/node/address.go
new file mode 100644
index 000000000..1fed8bba5
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/node/address.go
@@ -0,0 +1,853 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package node
+
+import (
+ "bufio"
+ "context"
+ "errors"
+ "fmt"
+ "net"
+ "os"
+ "strconv"
+ "strings"
+
+ "github.com/sirupsen/logrus"
+
+ "github.com/cilium/cilium/api/v1/models"
+ "github.com/cilium/cilium/pkg/byteorder"
+ "github.com/cilium/cilium/pkg/cidr"
+ "github.com/cilium/cilium/pkg/common"
+ "github.com/cilium/cilium/pkg/defaults"
+ "github.com/cilium/cilium/pkg/lock"
+ "github.com/cilium/cilium/pkg/logging/logfields"
+ "github.com/cilium/cilium/pkg/mac"
+ "github.com/cilium/cilium/pkg/option"
+ wgTypes "github.com/cilium/cilium/pkg/wireguard/types"
+)
+
+const preferPublicIP bool = true
+
+var (
+ // addrsMu protects addrs. Outside the addresses struct
+ // so that we can Uninitialize() without linter complaining
+ // about lock copying.
+ addrsMu lock.RWMutex
+ addrs addresses
+
+ // localNode holds the current state of the local "types.Node".
+ // This is defined here until all uses of the getters and
+ // setters in this file have been migrated to use LocalNodeStore
+ // directly.
+ // Initialized to proper instance via an invoke function in LocalNodeStoreCell,
+ // or temporarily in tests with 'WithTestLocalNodeStore'.
+ localNode *LocalNodeStore
+)
+
+func getLocalNode() LocalNode {
+ n, err := localNode.Get(context.TODO())
+ if err != nil {
+ // Only expecting errors if we're called after LocalNodeStore has stopped, e.g.
+ // we have a component that uses the legacy getters and setters here and does
+ // not depend on LocalNodeStore.
+ log.WithError(err).Fatal("getLocalNode: unexpected error")
+ }
+ return n
+}
+
+type addresses struct {
+ ipv4Loopback net.IP
+ ipv4NodePortAddrs map[string]net.IP // iface name => ip addr
+ ipv4MasqAddrs map[string]net.IP // iface name => ip addr
+ ipv6NodePortAddrs map[string]net.IP // iface name => ip addr
+ ipv6MasqAddrs map[string]net.IP // iface name => ip addr
+ routerInfo RouterInfo
+}
+
+type RouterInfo interface {
+ GetIPv4CIDRs() []net.IPNet
+ GetMac() mac.MAC
+ GetInterfaceNumber() int
+}
+
+func makeIPv6HostIP() net.IP {
+ ipstr := "fc00::10CA:1"
+ ip := net.ParseIP(ipstr)
+ if ip == nil {
+ log.WithField(logfields.IPAddr, ipstr).Fatal("Unable to parse IP")
+ }
+
+ return ip
+}
+
+// InitDefaultPrefix initializes the node address and allocation prefixes with
+// default values derived from the system. device can be set to the primary
+// network device of the system in which case the first address with global
+// scope will be regarded as the system's node address.
+func InitDefaultPrefix(device string) {
+ localNode.Update(func(n *LocalNode) {
+ SetDefaultPrefix(option.Config, device, n)
+ })
+}
+
+func SetDefaultPrefix(cfg *option.DaemonConfig, device string, node *LocalNode) {
+ if cfg.EnableIPv4 {
+ isIPv6 := false
+
+ ip, err := firstGlobalV4Addr(device, node.GetCiliumInternalIP(isIPv6), preferPublicIP)
+ if err != nil {
+ return
+ }
+
+ if node.GetNodeIP(isIPv6) == nil {
+ node.SetNodeInternalIP(ip)
+ }
+
+ ipv4range := node.IPv4AllocCIDR
+ ipv6range := node.IPv6AllocCIDR
+
+ if ipv4range == nil {
+ // If the IPv6AllocRange is not nil then the IPv4 allocation should be
+ // derived from the IPv6AllocRange.
+ // vvvv vvvv
+ // FD00:0000:0000:0000:0000:0000:0000:0000
+ if ipv6range != nil {
+ ip = net.IPv4(
+ ipv6range.IP[8],
+ ipv6range.IP[9],
+ ipv6range.IP[10],
+ ipv6range.IP[11])
+ }
+ v4range := fmt.Sprintf(defaults.DefaultIPv4Prefix+"/%d",
+ ip.To4()[3], defaults.DefaultIPv4PrefixLen)
+ _, ip4net, err := net.ParseCIDR(v4range)
+ if err != nil {
+ log.WithError(err).WithField(logfields.V4Prefix, v4range).Panic("BUG: Invalid default IPv4 prefix")
+ }
+
+ node.IPv4AllocCIDR = cidr.NewCIDR(ip4net)
+ log.WithField(logfields.V4Prefix, node.IPv4AllocCIDR).Info("Using autogenerated IPv4 allocation range")
+ }
+ }
+
+ if cfg.EnableIPv6 {
+ isIPv6 := true
+ ipv4range := node.IPv4AllocCIDR
+ ipv6range := node.IPv6AllocCIDR
+
+ if node.GetNodeIP(isIPv6) == nil {
+ // Find a IPv6 node address first
+ addr, _ := firstGlobalV6Addr(device, node.GetCiliumInternalIP(isIPv6), preferPublicIP)
+ if addr == nil {
+ addr = makeIPv6HostIP()
+ }
+ node.SetNodeInternalIP(addr)
+ }
+
+ if ipv6range == nil && ipv4range != nil {
+ // The IPv6 allocation should be derived from the IPv4 allocation.
+ ip := ipv4range.IP
+ v6range := fmt.Sprintf("%s%02x%02x:%02x%02x:0:0/%d",
+ cfg.IPv6ClusterAllocCIDRBase, ip[0], ip[1], ip[2], ip[3], 96)
+
+ _, ip6net, err := net.ParseCIDR(v6range)
+ if err != nil {
+ log.WithError(err).WithField(logfields.V6Prefix, v6range).Panic("BUG: Invalid default IPv6 prefix")
+ }
+
+ node.IPv6AllocCIDR = cidr.NewCIDR(ip6net)
+ log.WithField(logfields.V6Prefix, node.IPv6AllocCIDR).Info("Using autogenerated IPv6 allocation range")
+ }
+ }
+}
+
+// InitNodePortAddrs initializes NodePort IPv{4,6} addrs for the given devices.
+// If inheritIPAddrFromDevice is non-empty, then the IP addr for the devices
+// will be derived from it.
+func InitNodePortAddrs(devices []string, inheritIPAddrFromDevice string) error {
+ addrsMu.Lock()
+ defer addrsMu.Unlock()
+
+ var inheritedIP net.IP
+ var err error
+
+ if option.Config.EnableIPv4 {
+ if inheritIPAddrFromDevice != "" {
+ inheritedIP, err = firstGlobalV4Addr(inheritIPAddrFromDevice, GetK8sNodeIP(), !preferPublicIP)
+ if err != nil {
+ return fmt.Errorf("failed to determine IPv4 of %s for NodePort", inheritIPAddrFromDevice)
+ }
+ }
+ addrs.ipv4NodePortAddrs = make(map[string]net.IP, len(devices))
+ for _, device := range devices {
+ if inheritIPAddrFromDevice != "" {
+ addrs.ipv4NodePortAddrs[device] = inheritedIP
+ } else {
+ ip, err := firstGlobalV4Addr(device, GetK8sNodeIP(), !preferPublicIP)
+ if err != nil {
+ return fmt.Errorf("failed to determine IPv4 of %s for NodePort", device)
+ }
+ addrs.ipv4NodePortAddrs[device] = ip
+ }
+ }
+ }
+
+ if option.Config.EnableIPv6 {
+ if inheritIPAddrFromDevice != "" {
+ inheritedIP, err = firstGlobalV6Addr(inheritIPAddrFromDevice, GetK8sNodeIP(), !preferPublicIP)
+ if err != nil {
+ return fmt.Errorf("Failed to determine IPv6 of %s for NodePort", inheritIPAddrFromDevice)
+ }
+ }
+ addrs.ipv6NodePortAddrs = make(map[string]net.IP, len(devices))
+ for _, device := range devices {
+ if inheritIPAddrFromDevice != "" {
+ addrs.ipv6NodePortAddrs[device] = inheritedIP
+ } else {
+ ip, err := firstGlobalV6Addr(device, GetK8sNodeIP(), !preferPublicIP)
+ if err != nil {
+ return fmt.Errorf("Failed to determine IPv6 of %s for NodePort", device)
+ }
+ addrs.ipv6NodePortAddrs[device] = ip
+ }
+ }
+ }
+
+ return nil
+}
+
+// InitBPFMasqueradeAddrs initializes BPF masquerade addrs for the given devices.
+func InitBPFMasqueradeAddrs(devices []string) error {
+ addrsMu.Lock()
+ defer addrsMu.Unlock()
+
+ masqIPFromDevice := option.Config.DeriveMasqIPAddrFromDevice
+
+ if option.Config.EnableIPv4 {
+ addrs.ipv4MasqAddrs = make(map[string]net.IP, len(devices))
+ err := initMasqueradeV4Addrs(addrs.ipv4MasqAddrs, masqIPFromDevice, devices, logfields.IPv4)
+ if err != nil {
+ return err
+ }
+ }
+ if option.Config.EnableIPv6 {
+ addrs.ipv6MasqAddrs = make(map[string]net.IP, len(devices))
+ return initMasqueradeV6Addrs(addrs.ipv6MasqAddrs, masqIPFromDevice, devices, logfields.IPv6)
+ }
+
+ return nil
+}
+
+func clone(ip net.IP) net.IP {
+ if ip == nil {
+ return nil
+ }
+ dup := make(net.IP, len(ip))
+ copy(dup, ip)
+ return dup
+}
+
+// GetIPv4Loopback returns the loopback IPv4 address of this node.
+func GetIPv4Loopback() net.IP {
+ addrsMu.RLock()
+ defer addrsMu.RUnlock()
+ return clone(addrs.ipv4Loopback)
+}
+
+// SetIPv4Loopback sets the loopback IPv4 address of this node.
+func SetIPv4Loopback(ip net.IP) {
+ addrsMu.Lock()
+ addrs.ipv4Loopback = clone(ip)
+ addrsMu.Unlock()
+}
+
+// GetIPv4AllocRange returns the IPv4 allocation prefix of this node
+func GetIPv4AllocRange() *cidr.CIDR {
+ return getLocalNode().IPv4AllocCIDR.DeepCopy()
+}
+
+// GetIPv6AllocRange returns the IPv6 allocation prefix of this node
+func GetIPv6AllocRange() *cidr.CIDR {
+ return getLocalNode().IPv6AllocCIDR.DeepCopy()
+}
+
+// GetIPv4 returns one of the IPv4 node address available with the following
+// priority:
+// - NodeInternalIP
+// - NodeExternalIP
+// - other IP address type.
+// It must be reachable on the network.
+func GetIPv4() net.IP {
+ n := getLocalNode()
+ return clone(n.GetNodeIP(false))
+}
+
+// GetInternalIPv4 returns node internal ipv4 address else return nil.
+func GetInternalIPv4() net.IP {
+ n := getLocalNode()
+ return clone(n.GetNodeInternalIPv4())
+}
+
+// GetInternalIPv6 returns node internal ipv6 address else return nil.
+func GetInternalIPv6() net.IP {
+ n := getLocalNode()
+ return clone(n.GetNodeInternalIPv6())
+}
+
+// GetCiliumEndpointNodeIP is the node IP that will be referenced by CiliumEndpoints with endpoints
+// running on this node.
+func GetCiliumEndpointNodeIP() string {
+ if option.Config.EnableIPv4 {
+ return GetIPv4().String()
+ }
+ return GetIPv6().String()
+}
+
+// SetInternalIPv4Router sets the cilium internal IPv4 node address, it is allocated from the node prefix.
+// This must not be conflated with k8s internal IP as this IP address is only relevant within the
+// Cilium-managed network (this means within the node for direct routing mode and on the overlay
+// for tunnel mode).
+func SetInternalIPv4Router(ip net.IP) {
+ localNode.Update(func(n *LocalNode) {
+ n.SetCiliumInternalIP(ip)
+ })
+}
+
+// GetInternalIPv4Router returns the cilium internal IPv4 node address. This must not be conflated with
+// k8s internal IP as this IP address is only relevant within the Cilium-managed network (this means
+// within the node for direct routing mode and on the overlay for tunnel mode).
+func GetInternalIPv4Router() net.IP {
+ n := getLocalNode()
+ return n.GetCiliumInternalIP(false)
+}
+
+// GetK8sExternalIPv4 returns the external IPv4 node address. It must be a public IP that is routable
+// on the network as well as the internet. It can return nil if no External IPv4 address is assigned.
+func GetK8sExternalIPv4() net.IP {
+ n := getLocalNode()
+ return n.GetExternalIP(false)
+}
+
+// GetRouterInfo returns additional information for the router, the cilium_host interface.
+func GetRouterInfo() RouterInfo {
+ addrsMu.RLock()
+ defer addrsMu.RUnlock()
+ return addrs.routerInfo
+}
+
+// SetRouterInfo sets additional information for the router, the cilium_host interface.
+func SetRouterInfo(info RouterInfo) {
+ addrsMu.Lock()
+ addrs.routerInfo = info
+ addrsMu.Unlock()
+}
+
+// GetHostMasqueradeIPv4 returns the IPv4 address to be used for masquerading
+// any traffic that is being forwarded from the host into the Cilium cluster.
+func GetHostMasqueradeIPv4() net.IP {
+ return GetInternalIPv4Router()
+}
+
+// SetIPv4AllocRange sets the IPv4 address pool to use when allocating
+// addresses for local endpoints
+func SetIPv4AllocRange(net *cidr.CIDR) {
+ localNode.Update(func(n *LocalNode) {
+ n.IPv4AllocCIDR = net
+ })
+}
+
+// GetNodePortIPv4Addrs returns the node-port IPv4 address for NAT
+func GetNodePortIPv4Addrs() []net.IP {
+ addrsMu.RLock()
+ defer addrsMu.RUnlock()
+ addrs4 := make([]net.IP, 0, len(addrs.ipv4NodePortAddrs))
+ for _, addr := range addrs.ipv4NodePortAddrs {
+ addrs4 = append(addrs4, clone(addr))
+ }
+ return addrs4
+}
+
+// GetNodePortIPv4AddrsWithDevices returns the map iface => NodePort IPv4.
+func GetNodePortIPv4AddrsWithDevices() map[string]net.IP {
+ addrsMu.RLock()
+ defer addrsMu.RUnlock()
+ return copyStringToNetIPMap(addrs.ipv4NodePortAddrs)
+}
+
+// GetNodePortIPv6Addrs returns the node-port IPv6 address for NAT
+func GetNodePortIPv6Addrs() []net.IP {
+ addrsMu.RLock()
+ defer addrsMu.RUnlock()
+ addrs6 := make([]net.IP, 0, len(addrs.ipv6NodePortAddrs))
+ for _, addr := range addrs.ipv6NodePortAddrs {
+ addrs6 = append(addrs6, clone(addr))
+ }
+ return addrs6
+}
+
+// GetNodePortIPv6AddrsWithDevices returns the map iface => NodePort IPv6.
+func GetNodePortIPv6AddrsWithDevices() map[string]net.IP {
+ addrsMu.RLock()
+ defer addrsMu.RUnlock()
+ return copyStringToNetIPMap(addrs.ipv6NodePortAddrs)
+}
+
+// GetMasqIPv4AddrsWithDevices returns the map iface => BPF masquerade IPv4.
+func GetMasqIPv4AddrsWithDevices() map[string]net.IP {
+ addrsMu.RLock()
+ defer addrsMu.RUnlock()
+ return copyStringToNetIPMap(addrs.ipv4MasqAddrs)
+}
+
+// GetMasqIPv6AddrsWithDevices returns the map iface => BPF masquerade IPv6.
+func GetMasqIPv6AddrsWithDevices() map[string]net.IP {
+ addrsMu.RLock()
+ defer addrsMu.RUnlock()
+ return copyStringToNetIPMap(addrs.ipv6MasqAddrs)
+}
+
+// SetIPv6NodeRange sets the IPv6 address pool to be used on this node
+func SetIPv6NodeRange(net *cidr.CIDR) {
+ localNode.Update(func(n *LocalNode) {
+ n.IPv6AllocCIDR = net
+ })
+}
+
+// AutoComplete completes the parts of addressing that can be auto derived
+func AutoComplete() error {
+ if option.Config.EnableHostIPRestore {
+ // At this point, only attempt to restore the `cilium_host` IPs from
+ // the filesystem because we haven't fully synced with K8s yet.
+ restoreCiliumHostIPsFromFS()
+ }
+
+ InitDefaultPrefix(option.Config.DirectRoutingDevice)
+
+ if option.Config.EnableIPv6 && GetIPv6AllocRange() == nil {
+ return fmt.Errorf("IPv6 allocation CIDR is not configured. Please specify --%s", option.IPv6Range)
+ }
+
+ if option.Config.EnableIPv4 && GetIPv4AllocRange() == nil {
+ return fmt.Errorf("IPv4 allocation CIDR is not configured. Please specify --%s", option.IPv4Range)
+ }
+
+ return nil
+}
+
+// RestoreHostIPs restores the router IPs (`cilium_host`) from a previous
+// Cilium run. Router IPs from the filesystem are preferred over the IPs found
+// in the Kubernetes resource (Node or CiliumNode), because we consider the
+// filesystem to be the most up-to-date source of truth. The chosen router IP
+// is then checked whether it is contained inside node CIDR (pod CIDR) range.
+// If not, then the router IP is discarded and not restored.
+//
+// The restored IP is returned.
+func RestoreHostIPs(ipv6 bool, fromK8s, fromFS net.IP, cidrs []*cidr.CIDR) net.IP {
+ if !option.Config.EnableHostIPRestore {
+ return nil
+ }
+
+ var (
+ setter func(net.IP)
+ )
+ if ipv6 {
+ setter = SetIPv6Router
+ } else {
+ setter = SetInternalIPv4Router
+ }
+
+ ip, err := chooseHostIPsToRestore(ipv6, fromK8s, fromFS, cidrs)
+ switch {
+ case err != nil && errors.Is(err, errDoesNotBelong):
+ log.WithFields(logrus.Fields{
+ logfields.CIDRS: cidrs,
+ }).Infof(
+ "The router IP (%s) considered for restoration does not belong in the Pod CIDR of the node. Discarding old router IP.",
+ ip,
+ )
+ // Indicate that this IP will not be restored by setting to nil after
+ // we've used it to log above.
+ ip = nil
+ setter(nil)
+ case err != nil && errors.Is(err, errMismatch):
+ log.Warnf(
+ mismatchRouterIPsMsg,
+ fromK8s, fromFS, option.LocalRouterIPv4, option.LocalRouterIPv6,
+ )
+ fallthrough // Above is just a warning; we still want to set the router IP regardless.
+ case err == nil:
+ setter(ip)
+ }
+
+ return ip
+}
+
+func chooseHostIPsToRestore(ipv6 bool, fromK8s, fromFS net.IP, cidrs []*cidr.CIDR) (ip net.IP, err error) {
+ switch {
+ // If both IPs are available, then check both for validity. We prefer the
+ // local IP from the FS over the K8s IP.
+ case fromK8s != nil && fromFS != nil:
+ if fromK8s.Equal(fromFS) {
+ ip = fromK8s
+ } else {
+ ip = fromFS
+ err = errMismatch
+
+ // Check if we need to fallback to using the fromK8s IP, in the
+ // case that the IP from the FS is not within the CIDR. If we
+ // fallback, then we also need to check the fromK8s IP is also
+ // within the CIDR.
+ for _, cidr := range cidrs {
+ if cidr != nil && cidr.Contains(ip) {
+ return
+ } else if cidr != nil && cidr.Contains(fromK8s) {
+ ip = fromK8s
+ return
+ }
+ }
+ }
+ case fromK8s == nil && fromFS != nil:
+ ip = fromFS
+ case fromK8s != nil && fromFS == nil:
+ ip = fromK8s
+ case fromK8s == nil && fromFS == nil:
+ // We do nothing in this case because there are no router IPs to
+ // restore.
+ return
+ }
+
+ for _, cidr := range cidrs {
+ if cidr != nil && cidr.Contains(ip) {
+ return
+ }
+ }
+
+ err = errDoesNotBelong
+ return
+}
+
+// restoreCiliumHostIPsFromFS restores the router IPs (`cilium_host`) from a
+// previous Cilium run. The IPs are restored from the filesystem. This is part
+// 1/2 of the restoration.
+func restoreCiliumHostIPsFromFS() {
+ // Read the previous cilium_host IPs from node_config.h for backward
+ // compatibility.
+ router4, router6 := getCiliumHostIPs()
+ if option.Config.EnableIPv4 {
+ SetInternalIPv4Router(router4)
+ }
+ if option.Config.EnableIPv6 {
+ SetIPv6Router(router6)
+ }
+}
+
+var (
+ errMismatch = errors.New("mismatched IPs")
+ errDoesNotBelong = errors.New("IP does not belong to CIDR")
+)
+
+const mismatchRouterIPsMsg = "Mismatch of router IPs found during restoration. The Kubernetes resource contained %s, while the filesystem contained %s. Using the router IP from the filesystem. To change the router IP, specify --%s and/or --%s."
+
+// ValidatePostInit validates the entire addressing setup and completes it as
+// required
+func ValidatePostInit() error {
+ if option.Config.EnableIPv4 || option.Config.TunnelingEnabled() {
+ if GetIPv4() == nil {
+ return fmt.Errorf("external IPv4 node address could not be derived, please configure via --ipv4-node")
+ }
+ }
+
+ if option.Config.EnableIPv4 && GetInternalIPv4Router() == nil {
+ return fmt.Errorf("BUG: Internal IPv4 node address was not configured")
+ }
+
+ return nil
+}
+
+// GetIPv6 returns the IPv6 address of the node
+func GetIPv6() net.IP {
+ n := getLocalNode()
+ return clone(n.GetNodeIP(true))
+}
+
+// GetHostMasqueradeIPv6 returns the IPv6 address to be used for masquerading
+// any traffic that is being forwarded from the host into the Cilium cluster.
+func GetHostMasqueradeIPv6() net.IP {
+ return GetIPv6Router()
+}
+
+// GetIPv6Router returns the IPv6 address of the router, e.g. address
+// of cilium_host device.
+func GetIPv6Router() net.IP {
+ n := getLocalNode()
+ return clone(n.GetCiliumInternalIP(true))
+}
+
+// SetIPv6Router sets the IPv6 address of the router address, e.g. address
+// of cilium_host device.
+func SetIPv6Router(ip net.IP) {
+ localNode.Update(func(n *LocalNode) {
+ n.SetCiliumInternalIP(ip)
+ })
+}
+
+// GetK8sExternalIPv6 returns the external IPv6 node address.
+func GetK8sExternalIPv6() net.IP {
+ n := getLocalNode()
+ return clone(n.GetExternalIP(false))
+}
+
+// GetNodeAddressing returns the NodeAddressing model for the local IPs.
+func GetNodeAddressing() *models.NodeAddressing {
+ a := &models.NodeAddressing{}
+
+ if option.Config.EnableIPv6 {
+ a.IPV6 = &models.NodeAddressingElement{
+ Enabled: option.Config.EnableIPv6,
+ IP: GetIPv6Router().String(),
+ AllocRange: GetIPv6AllocRange().String(),
+ }
+ }
+
+ if option.Config.EnableIPv4 {
+ a.IPV4 = &models.NodeAddressingElement{
+ Enabled: option.Config.EnableIPv4,
+ IP: GetInternalIPv4Router().String(),
+ AllocRange: GetIPv4AllocRange().String(),
+ }
+ }
+
+ return a
+}
+
+func getCiliumHostIPsFromFile(nodeConfig string) (ipv4GW, ipv6Router net.IP) {
+ // ipLen is the length of the IP address stored in the node_config.h
+ // it has the same length for both IPv4 and IPv6.
+ const ipLen = net.IPv6len
+
+ var hasIPv4, hasIPv6 bool
+ f, err := os.Open(nodeConfig)
+ switch {
+ case err != nil:
+ default:
+ defer f.Close()
+ scanner := bufio.NewScanner(f)
+ for scanner.Scan() {
+ txt := scanner.Text()
+ switch {
+ case !hasIPv6 && strings.Contains(txt, defaults.RestoreV6Addr):
+ defineLine := strings.Split(txt, defaults.RestoreV6Addr)
+ if len(defineLine) != 2 {
+ continue
+ }
+ ipv6 := common.C2GoArray(defineLine[1])
+ if len(ipv6) != ipLen {
+ continue
+ }
+ ipv6Router = net.IP(ipv6)
+ hasIPv6 = true
+ case !hasIPv4 && strings.Contains(txt, defaults.RestoreV4Addr):
+ defineLine := strings.Split(txt, defaults.RestoreV4Addr)
+ if len(defineLine) != 2 {
+ continue
+ }
+ ipv4 := common.C2GoArray(defineLine[1])
+ if len(ipv4) != ipLen {
+ continue
+ }
+ ipv4GW = net.IP(ipv4)
+ hasIPv4 = true
+
+ // Legacy cases based on the header defines:
+ case !hasIPv4 && strings.Contains(txt, "IPV4_GATEWAY"):
+ // #define IPV4_GATEWAY 0xee1c000a
+ defineLine := strings.Split(txt, " ")
+ if len(defineLine) != 3 {
+ continue
+ }
+ ipv4GWHex := strings.TrimPrefix(defineLine[2], "0x")
+ ipv4GWUint64, err := strconv.ParseUint(ipv4GWHex, 16, 32)
+ if err != nil {
+ continue
+ }
+ if ipv4GWUint64 != 0 {
+ bs := make([]byte, net.IPv4len)
+ byteorder.Native.PutUint32(bs, uint32(ipv4GWUint64))
+ ipv4GW = net.IPv4(bs[0], bs[1], bs[2], bs[3])
+ hasIPv4 = true
+ }
+ case !hasIPv6 && strings.Contains(txt, " ROUTER_IP "):
+ // #define ROUTER_IP 0xf0, 0xd, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xa, 0x0, 0x0, 0x0, 0x0, 0x0, 0x8a, 0xd6
+ defineLine := strings.Split(txt, " ROUTER_IP ")
+ if len(defineLine) != 2 {
+ continue
+ }
+ ipv6 := common.C2GoArray(defineLine[1])
+ if len(ipv6) != net.IPv6len {
+ continue
+ }
+ ipv6Router = net.IP(ipv6)
+ hasIPv6 = true
+ }
+ }
+ }
+ return ipv4GW, ipv6Router
+}
+
+// getCiliumHostIPs returns the Cilium IPv4 gateway and router IPv6 address from
+// the node_config.h file if is present; or by deriving it from
+// defaults.HostDevice interface, on which only the IPv4 is possible to derive.
+func getCiliumHostIPs() (ipv4GW, ipv6Router net.IP) {
+ nodeConfig := option.Config.GetNodeConfigPath()
+ ipv4GW, ipv6Router = getCiliumHostIPsFromFile(nodeConfig)
+ if ipv4GW != nil || ipv6Router != nil {
+ log.WithFields(logrus.Fields{
+ "ipv4": ipv4GW,
+ "ipv6": ipv6Router,
+ "file": nodeConfig,
+ }).Info("Restored router address from node_config")
+ return ipv4GW, ipv6Router
+ }
+ return getCiliumHostIPsFromNetDev(defaults.HostDevice)
+}
+
+// SetIPsecKeyIdentity sets the IPsec key identity an opaque value used to
+// identity encryption keys used on the node.
+func SetIPsecKeyIdentity(id uint8) {
+ localNode.Update(func(n *LocalNode) {
+ n.EncryptionKey = id
+ })
+}
+
+// GetIPsecKeyIdentity returns the IPsec key identity of the node
+func GetIPsecKeyIdentity() uint8 {
+ return getLocalNode().EncryptionKey
+}
+
+// GetK8sNodeIPs returns k8s Node IP addr.
+func GetK8sNodeIP() net.IP {
+ n := getLocalNode()
+ return n.GetK8sNodeIP()
+}
+
+func GetWireguardPubKey() string {
+ return getLocalNode().WireguardPubKey
+}
+
+func GetOptOutNodeEncryption() bool {
+ return getLocalNode().OptOutNodeEncryption
+}
+
+func SetOptOutNodeEncryption(b bool) {
+ localNode.Update(func(node *LocalNode) {
+ node.OptOutNodeEncryption = b
+ })
+}
+
+// SetEndpointHealthIPv4 sets the IPv4 cilium-health endpoint address.
+func SetEndpointHealthIPv4(ip net.IP) {
+ localNode.Update(func(n *LocalNode) {
+ n.IPv4HealthIP = ip
+ })
+}
+
+// GetEndpointHealthIPv4 returns the IPv4 cilium-health endpoint address.
+func GetEndpointHealthIPv4() net.IP {
+ return getLocalNode().IPv4HealthIP
+}
+
+// SetEndpointHealthIPv6 sets the IPv6 cilium-health endpoint address.
+func SetEndpointHealthIPv6(ip net.IP) {
+ localNode.Update(func(n *LocalNode) {
+ n.IPv6HealthIP = ip
+ })
+}
+
+// GetEndpointHealthIPv6 returns the IPv6 cilium-health endpoint address.
+func GetEndpointHealthIPv6() net.IP {
+ return getLocalNode().IPv6HealthIP
+}
+
+// SetIngressIPv4 sets the local IPv4 source address for Cilium Ingress.
+func SetIngressIPv4(ip net.IP) {
+ localNode.Update(func(n *LocalNode) {
+ n.IPv4IngressIP = ip
+ })
+}
+
+// GetIngressIPv4 returns the local IPv4 source address for Cilium Ingress.
+func GetIngressIPv4() net.IP {
+ return getLocalNode().IPv4IngressIP
+}
+
+// SetIngressIPv6 sets the local IPv6 source address for Cilium Ingress.
+func SetIngressIPv6(ip net.IP) {
+ localNode.Update(func(n *LocalNode) {
+ n.IPv6IngressIP = ip
+ })
+}
+
+// GetIngressIPv6 returns the local IPv6 source address for Cilium Ingress.
+func GetIngressIPv6() net.IP {
+ return getLocalNode().IPv6IngressIP
+}
+
+// GetEncryptKeyIndex returns the encryption key value for the local node.
+// With IPSec encryption, this is equivalent to GetIPsecKeyIdentity().
+// With WireGuard encryption, this function returns a non-zero static value
+// if the local node has WireGuard enabled.
+func GetEncryptKeyIndex() uint8 {
+ switch {
+ case option.Config.EnableIPSec:
+ return GetIPsecKeyIdentity()
+ case option.Config.EnableWireguard:
+ if len(GetWireguardPubKey()) > 0 {
+ return wgTypes.StaticEncryptKey
+ }
+ }
+ return 0
+}
+
+func copyStringToNetIPMap(in map[string]net.IP) map[string]net.IP {
+ out := make(map[string]net.IP, len(in))
+ for iface, ip := range in {
+ dup := make(net.IP, len(ip))
+ copy(dup, ip)
+ out[iface] = dup
+ }
+ return out
+}
+
+// WithTestLocalNodeStore sets the 'localNode' to a temporary instance and
+// runs the given test. Afterwards the 'localNode' is restored to nil.
+// This is a temporary workaround for tests until the LocalNodeStoreCell can be
+// used.
+func WithTestLocalNodeStore(runTest func()) {
+ SetTestLocalNodeStore()
+ defer UnsetTestLocalNodeStore()
+ runTest()
+}
+
+func SetTestLocalNodeStore() {
+ if localNode != nil {
+ panic("localNode already set")
+ }
+
+ // Set the localNode global variable temporarily so that the legacy getters
+ // and setters can access it.
+ localNode = NewTestLocalNodeStore(LocalNode{})
+}
+
+func UnsetTestLocalNodeStore() {
+ localNode = nil
+}
+
+// UpdateLocalNodeInTest provides access to modifying the local node
+// information from tests that are not yet using hive and the LocalNodeStoreCell.
+func UpdateLocalNodeInTest(mod func(n *LocalNode)) {
+ if localNode == nil {
+ panic("localNode not set, use node.LocalNodeStoreCell or WithTestLocalNodeStore()?")
+ }
+ localNode.Update(mod)
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/node/address_linux.go b/vendor/github.com/cilium/cilium/pkg/node/address_linux.go
new file mode 100644
index 000000000..b0f66d23e
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/node/address_linux.go
@@ -0,0 +1,242 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+//go:build !darwin
+
+package node
+
+import (
+ "fmt"
+ "net"
+ "sort"
+
+ "github.com/sirupsen/logrus"
+ "github.com/vishvananda/netlink"
+ "golang.org/x/sys/unix"
+
+ "github.com/cilium/cilium/pkg/ip"
+ "github.com/cilium/cilium/pkg/logging/logfields"
+)
+
+func firstGlobalAddr(intf string, preferredIP net.IP, family int, preferPublic bool) (net.IP, error) {
+ var link netlink.Link
+ var ipLen int
+ var err error
+
+ ipsToExclude := GetExcludedIPs()
+ linkScopeMax := unix.RT_SCOPE_UNIVERSE
+ if family == netlink.FAMILY_V4 {
+ ipLen = 4
+ } else {
+ ipLen = 16
+ }
+
+ if intf != "" && intf != "undefined" {
+ link, err = netlink.LinkByName(intf)
+ if err != nil {
+ link = nil
+ } else {
+ ipsToExclude = []net.IP{}
+ }
+ }
+
+retryInterface:
+ addr, err := netlink.AddrList(link, family)
+ if err != nil {
+ return nil, err
+ }
+
+retryScope:
+ ipsPublic := []netlink.Addr{}
+ ipsPrivate := []netlink.Addr{}
+ hasPreferred := false
+
+ for _, a := range addr {
+ if a.Scope > linkScopeMax {
+ continue
+ }
+ if ip.ListContainsIP(ipsToExclude, a.IP) {
+ continue
+ }
+ if len(a.IP) < ipLen {
+ continue
+ }
+ isPreferredIP := a.IP.Equal(preferredIP)
+ if a.Flags&unix.IFA_F_SECONDARY > 0 && !isPreferredIP {
+ // Skip secondary addresses if they're not the preferredIP
+ continue
+ }
+
+ if ip.IsPublicAddr(a.IP) {
+ ipsPublic = append(ipsPublic, a)
+ } else {
+ ipsPrivate = append(ipsPrivate, a)
+ }
+ // If the IP is the same as the preferredIP, that
+ // means that maybe it is restored from node_config.h,
+ // so if it is present we prefer this one, even if it
+ // is a secondary address.
+ if isPreferredIP {
+ hasPreferred = true
+ }
+ }
+
+ if hasPreferred && !preferPublic {
+ return preferredIP, nil
+ }
+
+ if len(ipsPublic) != 0 {
+ if hasPreferred && ip.IsPublicAddr(preferredIP) {
+ return preferredIP, nil
+ }
+
+ // Just make sure that we always return the same one and not a
+ // random one. More info in the issue GH-7637.
+ sort.SliceStable(ipsPublic, func(i, j int) bool {
+ return ipsPublic[i].LinkIndex < ipsPublic[j].LinkIndex
+ })
+
+ return ipsPublic[0].IP, nil
+ }
+
+ if len(ipsPrivate) != 0 {
+ if hasPreferred && !ip.IsPublicAddr(preferredIP) {
+ return preferredIP, nil
+ }
+
+ // Same stable order, see above ipsPublic.
+ sort.SliceStable(ipsPrivate, func(i, j int) bool {
+ return ipsPrivate[i].LinkIndex < ipsPrivate[j].LinkIndex
+ })
+
+ return ipsPrivate[0].IP, nil
+ }
+
+ // First, if a device is specified, fall back to anything wider
+ // than link (site, custom, ...) before trying all devices.
+ if linkScopeMax != unix.RT_SCOPE_SITE {
+ linkScopeMax = unix.RT_SCOPE_SITE
+ goto retryScope
+ }
+
+ // Fall back with retry for all interfaces with full scope again
+ // (which then goes back to lower scope again for all interfaces
+ // before we give up completely).
+ if link != nil {
+ linkScopeMax = unix.RT_SCOPE_UNIVERSE
+ link = nil
+ goto retryInterface
+ }
+
+ return nil, fmt.Errorf("No address found")
+}
+
+// firstGlobalV4Addr returns the first IPv4 global IP of an interface,
+// where the IPs are sorted in creation order (oldest to newest).
+//
+// All secondary IPs, except the preferredIP, are filtered out.
+//
+// Public IPs are preferred over private ones. When intf is defined only
+// IPs belonging to that interface are considered.
+//
+// If preferredIP is present in the IP list it is returned irrespective of
+// the sort order. However, if preferPublic is true and preferredIP is a
+// private IP, a public IP will be returned if it is assigned to the intf
+//
+// Passing intf and preferredIP will only return preferredIP if it is in
+// the IPs that belong to intf.
+//
+// In all cases, if intf is not found all interfaces are considered.
+//
+// If a intf-specific global address couldn't be found, we retry to find
+// an address with reduced scope (site, custom) on that particular device.
+//
+// If the latter fails as well, we retry on all interfaces beginning with
+// universe scope again (and then falling back to reduced scope).
+//
+// In case none of the above helped, we bail out with error.
+func firstGlobalV4Addr(intf string, preferredIP net.IP, preferPublic bool) (net.IP, error) {
+ return firstGlobalAddr(intf, preferredIP, netlink.FAMILY_V4, preferPublic)
+}
+
+// firstGlobalV6Addr returns first IPv6 global IP of an interface, see
+// firstGlobalV4Addr for more details.
+func firstGlobalV6Addr(intf string, preferredIP net.IP, preferPublic bool) (net.IP, error) {
+ return firstGlobalAddr(intf, preferredIP, netlink.FAMILY_V6, preferPublic)
+}
+
+// getCiliumHostIPsFromNetDev returns the first IPv4 link local and returns
+// it
+func getCiliumHostIPsFromNetDev(devName string) (ipv4GW, ipv6Router net.IP) {
+ hostDev, err := netlink.LinkByName(devName)
+ if err != nil {
+ return nil, nil
+ }
+ addrs, err := netlink.AddrList(hostDev, netlink.FAMILY_ALL)
+ if err != nil {
+ return nil, nil
+ }
+ for _, addr := range addrs {
+ if addr.IP.To4() != nil {
+ if addr.Scope == int(netlink.SCOPE_LINK) {
+ ipv4GW = addr.IP
+ }
+ } else {
+ if addr.Scope != int(netlink.SCOPE_LINK) {
+ ipv6Router = addr.IP
+ }
+ }
+ }
+
+ if ipv4GW != nil || ipv6Router != nil {
+ log.WithFields(logrus.Fields{
+ "ipv4": ipv4GW,
+ "ipv6": ipv6Router,
+ "device": devName,
+ }).Info("Restored router address from device")
+ }
+
+ return ipv4GW, ipv6Router
+}
+
+// initMasqueradeAddrs initializes BPF masquerade addresses for the given
+// devices.
+func initMasqueradeAddrs(masqAddrs map[string]net.IP, family int, masqIPFromDevice string, devices []string, logfield string) error {
+ if ifaceName := masqIPFromDevice; ifaceName != "" {
+ ip, err := firstGlobalAddr(ifaceName, nil, family, preferPublicIP)
+ if err != nil {
+ return fmt.Errorf("Failed to determine IP of %s for BPF masq", ifaceName)
+ }
+ for _, device := range devices {
+ masqAddrs[device] = ip
+ }
+ return nil
+ }
+
+ for _, device := range devices {
+ ip, err := firstGlobalAddr(device, GetK8sNodeIP(), family, preferPublicIP)
+ if err != nil {
+ return fmt.Errorf("Failed to determine IP of %s for BPF masq", device)
+ }
+
+ masqAddrs[device] = ip
+ log.WithFields(logrus.Fields{
+ logfield: ip,
+ logfields.Device: device,
+ }).Info("Masquerading IP selected for device")
+ }
+
+ return nil
+}
+
+// initMasqueradeV4Addrs initializes BPF masquerade IPv4 addresses for the
+// given devices.
+func initMasqueradeV4Addrs(masqAddrs map[string]net.IP, masqIPFromDevice string, devices []string, logfield string) error {
+ return initMasqueradeAddrs(masqAddrs, netlink.FAMILY_V4, masqIPFromDevice, devices, logfield)
+}
+
+// initMasqueradeV6Addrs initializes BPF masquerade IPv6 addresses for the
+// given devices.
+func initMasqueradeV6Addrs(masqAddrs map[string]net.IP, masqIPFromDevice string, devices []string, logfield string) error {
+ return initMasqueradeAddrs(masqAddrs, netlink.FAMILY_V6, masqIPFromDevice, devices, logfield)
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/node/address_other.go b/vendor/github.com/cilium/cilium/pkg/node/address_other.go
new file mode 100644
index 000000000..af2ba9fae
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/node/address_other.go
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+//go:build !linux
+
+package node
+
+import "net"
+
+func firstGlobalAddr(intf string, preferredIP net.IP, family int, preferPublic bool) (net.IP, error) {
+ return net.IP{}, nil
+}
+
+func firstGlobalV4Addr(intf string, preferredIP net.IP, preferPublic bool) (net.IP, error) {
+ return net.IP{}, nil
+}
+
+func firstGlobalV6Addr(intf string, preferredIP net.IP, preferPublic bool) (net.IP, error) {
+ return net.IP{}, nil
+}
+
+func initMasqueradeV4Addrs(masqAddrs map[string]net.IP, masqIPFromDevice string, devices []string, logfield string) error {
+ return nil
+}
+
+func initMasqueradeV6Addrs(masqAddrs map[string]net.IP, masqIPFromDevice string, devices []string, logfield string) error {
+ return nil
+}
+
+// getCiliumHostIPsFromNetDev returns the first IPv4 link local and returns
+// it
+func getCiliumHostIPsFromNetDev(devName string) (ipv4GW, ipv6Router net.IP) {
+ return net.IP{}, net.IP{}
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/node/addressing/addresstype.go b/vendor/github.com/cilium/cilium/pkg/node/addressing/addresstype.go
new file mode 100644
index 000000000..f75d58472
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/node/addressing/addresstype.go
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package addressing
+
+// AddressType represents a type of IP address for a node. They are copied
+// from k8s.io/api/core/v1/types.go to avoid pulling in a lot of Kubernetes
+// imports into this package.s
+type AddressType string
+
+const (
+ NodeHostName AddressType = "Hostname"
+ NodeExternalIP AddressType = "ExternalIP"
+ NodeInternalIP AddressType = "InternalIP"
+ NodeExternalDNS AddressType = "ExternalDNS"
+ NodeInternalDNS AddressType = "InternalDNS"
+ NodeCiliumInternalIP AddressType = "CiliumInternalIP"
+)
diff --git a/vendor/github.com/cilium/cilium/pkg/node/doc.go b/vendor/github.com/cilium/cilium/pkg/node/doc.go
new file mode 100644
index 000000000..076044279
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/node/doc.go
@@ -0,0 +1,6 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Package node provides functionality related to the local and remote node
+// addresses
+package node
diff --git a/vendor/github.com/cilium/cilium/pkg/node/host_endpoint.go b/vendor/github.com/cilium/cilium/pkg/node/host_endpoint.go
new file mode 100644
index 000000000..5d6fdce87
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/node/host_endpoint.go
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package node
+
+const (
+ templateHostEndpointID = uint64(0xffff)
+)
+
+var (
+ endpointID = templateHostEndpointID
+)
+
+// GetLabels returns the labels of this node.
+func GetLabels() map[string]string {
+ return getLocalNode().Labels
+}
+
+// SetLabels sets the labels of this node.
+func SetLabels(l map[string]string) {
+ localNode.Update(func(n *LocalNode) {
+ n.Labels = l
+ })
+}
+
+// SetAnnotations sets the annotations for this node.
+func SetAnnotations(a map[string]string) {
+ localNode.Update(func(n *LocalNode) {
+ n.Annotations = a
+ })
+}
+
+// SetMultiAttributes allows the caller to set multiple attributes
+// on the LocalNode by passing a function which modifies LocalNode
+// directly.
+//
+// This is useful when you need to update more then one attribute at once
+// but do not want to trigger Observers more then once.
+func SetMultiAttributes(f func(n *LocalNode)) {
+ localNode.Update(f)
+}
+
+// GetEndpointID returns the ID of the host endpoint for this node.
+func GetEndpointID() uint64 {
+ return endpointID
+}
+
+// SetEndpointID sets the ID of the host endpoint this node.
+func SetEndpointID(id uint64) {
+ endpointID = id
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/node/ip.go b/vendor/github.com/cilium/cilium/pkg/node/ip.go
new file mode 100644
index 000000000..fe3a76e6e
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/node/ip.go
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package node
+
+import "net"
+
+var excludedIPs []net.IP
+
+// GetExcludedIPs returns a list of IPs from netdevices that Cilium
+// needs to exclude to operate
+func GetExcludedIPs() []net.IP {
+ return excludedIPs
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/node/ip_linux.go b/vendor/github.com/cilium/cilium/pkg/node/ip_linux.go
new file mode 100644
index 000000000..21a7176fc
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/node/ip_linux.go
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package node
+
+import (
+ "strings"
+
+ "github.com/vishvananda/netlink"
+)
+
+func init() {
+ initExcludedIPs()
+}
+
+func initExcludedIPs() {
+ // We exclude below bad device prefixes from address selection ...
+ prefixes := []string{
+ "docker",
+ }
+ links, err := netlink.LinkList()
+ if err != nil {
+ return
+ }
+ for _, l := range links {
+ // ... also all down devices since they won't be reachable.
+ //
+ // We need to check for both "up" and "unknown" state, as some
+ // drivers may not implement operstate handling, and just report
+ // their state as unknown even though they are operational.
+ if l.Attrs().OperState == netlink.OperUp ||
+ l.Attrs().OperState == netlink.OperUnknown {
+ skip := true
+ for _, p := range prefixes {
+ if strings.HasPrefix(l.Attrs().Name, p) {
+ skip = false
+ break
+ }
+ }
+ if skip {
+ continue
+ }
+ }
+ addr, err := netlink.AddrList(l, netlink.FAMILY_ALL)
+ if err != nil {
+ continue
+ }
+ for _, a := range addr {
+ excludedIPs = append(excludedIPs, a.IP)
+ }
+ }
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/node/local_node_store.go b/vendor/github.com/cilium/cilium/pkg/node/local_node_store.go
new file mode 100644
index 000000000..10d3e42ba
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/node/local_node_store.go
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package node
+
+import (
+ "context"
+
+ "github.com/cilium/cilium/pkg/hive"
+ "github.com/cilium/cilium/pkg/hive/cell"
+ "github.com/cilium/cilium/pkg/lock"
+ "github.com/cilium/cilium/pkg/node/types"
+ "github.com/cilium/cilium/pkg/stream"
+)
+
+type LocalNode struct {
+ types.Node
+ // OptOutNodeEncryption will make the local node opt-out of node-to-node
+ // encryption
+ OptOutNodeEncryption bool
+}
+
+// LocalNodeInitializer specifies how to build the initial local node object.
+type LocalNodeInitializer interface {
+ InitLocalNode(context.Context, *LocalNode) error
+}
+
+// LocalNodeStoreCell provides the LocalNodeStore instance.
+// The LocalNodeStore is the canonical owner of `types.Node` for the local node and
+// provides a reactive API for observing and updating it.
+var LocalNodeStoreCell = cell.Module(
+ "local-node-store",
+ "Provides LocalNodeStore for observing and updating local node info",
+
+ cell.Provide(NewLocalNodeStore),
+)
+
+// LocalNodeStoreParams are the inputs needed for constructing LocalNodeStore.
+type LocalNodeStoreParams struct {
+ cell.In
+
+ Lifecycle hive.Lifecycle
+ Init LocalNodeInitializer `optional:"true"`
+}
+
+// LocalNodeStore is the canonical owner for the local node object and provides
+// a reactive API for observing and updating the state.
+type LocalNodeStore struct {
+ // Changes to the local node are observable.
+ stream.Observable[LocalNode]
+
+ mu lock.Mutex
+ value LocalNode
+ emit func(LocalNode)
+ complete func(error)
+}
+
+func NewTestLocalNodeStore(mockNode LocalNode) *LocalNodeStore {
+ src, emit, complete := stream.Multicast[LocalNode](stream.EmitLatest)
+ emit(mockNode)
+ return &LocalNodeStore{
+ Observable: src,
+ emit: emit,
+ complete: complete,
+ value: mockNode,
+ }
+}
+
+func NewLocalNodeStore(params LocalNodeStoreParams) (*LocalNodeStore, error) {
+ src, emit, complete := stream.Multicast[LocalNode](stream.EmitLatest)
+
+ s := &LocalNodeStore{
+ Observable: src,
+ }
+
+ params.Lifecycle.Append(hive.Hook{
+ OnStart: func(ctx hive.HookContext) error {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ if params.Init != nil {
+ if err := params.Init.InitLocalNode(ctx, &s.value); err != nil {
+ return err
+ }
+ }
+
+ // Set the global variable still used by getters
+ // and setters in address.go. We're setting it in Start
+ // to catch uses of it before it's initialized.
+ localNode = s
+
+ s.emit = emit
+ s.complete = complete
+ emit(s.value)
+ return nil
+ },
+ OnStop: func(hive.HookContext) error {
+ s.mu.Lock()
+ s.complete(nil)
+ s.complete = nil
+ s.emit = nil
+ s.mu.Unlock()
+
+ localNode = nil
+ return nil
+ },
+ })
+
+ return s, nil
+}
+
+// Get retrieves the current local node. Use Get() only for inspecting the state,
+// e.g. in API handlers. Do not assume the value does not change over time.
+// Blocks until the store has been initialized.
+func (s *LocalNodeStore) Get(ctx context.Context) (LocalNode, error) {
+ // Subscribe to the stream of updates and take the first (latest) state.
+ return stream.First[LocalNode](ctx, s)
+}
+
+// Update modifies the local node with a mutator. The updated value
+// is passed to observers.
+func (s *LocalNodeStore) Update(update func(*LocalNode)) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ update(&s.value)
+
+ if s.emit != nil {
+ s.emit(s.value)
+ }
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/node/logfields.go b/vendor/github.com/cilium/cilium/pkg/node/logfields.go
new file mode 100644
index 000000000..5b12bc9f1
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/node/logfields.go
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package node
+
+import (
+ "github.com/cilium/cilium/pkg/logging"
+ "github.com/cilium/cilium/pkg/logging/logfields"
+)
+
+var log = logging.DefaultLogger.WithField(logfields.LogSubsys, "node")
diff --git a/vendor/github.com/cilium/cilium/pkg/node/types/logfields.go b/vendor/github.com/cilium/cilium/pkg/node/types/logfields.go
new file mode 100644
index 000000000..f992f9136
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/node/types/logfields.go
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package types
+
+import (
+ "github.com/cilium/cilium/pkg/logging"
+ "github.com/cilium/cilium/pkg/logging/logfields"
+)
+
+var log = logging.DefaultLogger.WithField(logfields.LogSubsys, "node")
diff --git a/vendor/github.com/cilium/cilium/pkg/node/types/node.go b/vendor/github.com/cilium/cilium/pkg/node/types/node.go
new file mode 100644
index 000000000..b82cc74fe
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/node/types/node.go
@@ -0,0 +1,666 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package types
+
+import (
+ "encoding/json"
+ "net"
+ "path"
+ "strings"
+
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ "github.com/cilium/cilium/api/v1/models"
+ "github.com/cilium/cilium/pkg/annotation"
+ "github.com/cilium/cilium/pkg/cidr"
+ cmtypes "github.com/cilium/cilium/pkg/clustermesh/types"
+ "github.com/cilium/cilium/pkg/defaults"
+ ipamTypes "github.com/cilium/cilium/pkg/ipam/types"
+ ciliumv2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2"
+ "github.com/cilium/cilium/pkg/kvstore/store"
+ "github.com/cilium/cilium/pkg/node/addressing"
+ "github.com/cilium/cilium/pkg/option"
+ "github.com/cilium/cilium/pkg/source"
+)
+
+// Identity represents the node identity of a node.
+type Identity struct {
+ Name string
+ Cluster string
+}
+
+// String returns the string representation on NodeIdentity.
+func (nn Identity) String() string {
+ return path.Join(nn.Cluster, nn.Name)
+}
+
+// appendAllocCDIR sets or appends the given podCIDR to the node.
+// If the IPv4/IPv6AllocCIDR is already set, we add the podCIDR as a secondary
+// alloc CIDR.
+func (n *Node) appendAllocCDIR(podCIDR *cidr.CIDR) {
+ if podCIDR.IP.To4() != nil {
+ if n.IPv4AllocCIDR == nil {
+ n.IPv4AllocCIDR = podCIDR
+ } else {
+ n.IPv4SecondaryAllocCIDRs = append(n.IPv4SecondaryAllocCIDRs, podCIDR)
+ }
+ } else {
+ if n.IPv6AllocCIDR == nil {
+ n.IPv6AllocCIDR = podCIDR
+ } else {
+ n.IPv6SecondaryAllocCIDRs = append(n.IPv6SecondaryAllocCIDRs, podCIDR)
+ }
+ }
+}
+
+// ParseCiliumNode parses a CiliumNode custom resource and returns a Node
+// instance. Invalid IP and CIDRs are silently ignored
+func ParseCiliumNode(n *ciliumv2.CiliumNode) (node Node) {
+ wireguardPubKey, _ := annotation.Get(n, annotation.WireguardPubKey, annotation.WireguardPubKeyAlias)
+ node = Node{
+ Name: n.Name,
+ EncryptionKey: uint8(n.Spec.Encryption.Key),
+ Cluster: option.Config.ClusterName,
+ ClusterID: option.Config.ClusterID,
+ Source: source.CustomResource,
+ Labels: n.ObjectMeta.Labels,
+ Annotations: n.ObjectMeta.Annotations,
+ NodeIdentity: uint32(n.Spec.NodeIdentity),
+ WireguardPubKey: wireguardPubKey,
+ }
+
+ for _, cidrString := range n.Spec.IPAM.PodCIDRs {
+ ipnet, err := cidr.ParseCIDR(cidrString)
+ if err == nil {
+ node.appendAllocCDIR(ipnet)
+ }
+ }
+
+ for _, pool := range n.Spec.IPAM.Pools.Allocated {
+ for _, podCIDR := range pool.CIDRs {
+ ipnet, err := cidr.ParseCIDR(string(podCIDR))
+ if err == nil {
+ node.appendAllocCDIR(ipnet)
+ }
+ }
+ }
+
+ node.IPv4HealthIP = net.ParseIP(n.Spec.HealthAddressing.IPv4)
+ node.IPv6HealthIP = net.ParseIP(n.Spec.HealthAddressing.IPv6)
+
+ node.IPv4IngressIP = net.ParseIP(n.Spec.IngressAddressing.IPV4)
+ node.IPv6IngressIP = net.ParseIP(n.Spec.IngressAddressing.IPV6)
+
+ for _, address := range n.Spec.Addresses {
+ if ip := net.ParseIP(address.IP); ip != nil {
+ node.IPAddresses = append(node.IPAddresses, Address{Type: address.Type, IP: ip})
+ }
+ }
+
+ return
+}
+
+// GetCiliumAnnotations returns the node annotations that should be set on the CiliumNode
+func (n *Node) GetCiliumAnnotations() map[string]string {
+ annotations := map[string]string{}
+ if n.WireguardPubKey != "" {
+ annotations[annotation.WireguardPubKey] = n.WireguardPubKey
+ }
+
+ // if we use a cilium node instead of a node, we also need the BGP Control Plane annotations in the cilium node instead of the main node
+ for k, a := range n.Annotations {
+ if strings.HasPrefix(k, annotation.BGPVRouterAnnoPrefix) {
+ annotations[k] = a
+ }
+ }
+
+ return annotations
+}
+
+// ToCiliumNode converts the node to a CiliumNode
+func (n *Node) ToCiliumNode() *ciliumv2.CiliumNode {
+ var (
+ podCIDRs []string
+ ipAddrs []ciliumv2.NodeAddress
+ healthIPv4, healthIPv6 string
+ ingressIPv4, ingressIPv6 string
+ )
+
+ if n.IPv4AllocCIDR != nil {
+ podCIDRs = append(podCIDRs, n.IPv4AllocCIDR.String())
+ }
+ if n.IPv6AllocCIDR != nil {
+ podCIDRs = append(podCIDRs, n.IPv6AllocCIDR.String())
+ }
+ for _, ipv4AllocCIDR := range n.IPv4SecondaryAllocCIDRs {
+ podCIDRs = append(podCIDRs, ipv4AllocCIDR.String())
+ }
+ for _, ipv6AllocCIDR := range n.IPv6SecondaryAllocCIDRs {
+ podCIDRs = append(podCIDRs, ipv6AllocCIDR.String())
+ }
+ if n.IPv4HealthIP != nil {
+ healthIPv4 = n.IPv4HealthIP.String()
+ }
+ if n.IPv6HealthIP != nil {
+ healthIPv6 = n.IPv6HealthIP.String()
+ }
+ if n.IPv4IngressIP != nil {
+ ingressIPv4 = n.IPv4IngressIP.String()
+ }
+ if n.IPv6IngressIP != nil {
+ ingressIPv6 = n.IPv6IngressIP.String()
+ }
+
+ for _, address := range n.IPAddresses {
+ ipAddrs = append(ipAddrs, ciliumv2.NodeAddress{
+ Type: address.Type,
+ IP: address.IP.String(),
+ })
+ }
+
+ return &ciliumv2.CiliumNode{
+ ObjectMeta: v1.ObjectMeta{
+ Name: n.Name,
+ Labels: n.Labels,
+ Annotations: n.GetCiliumAnnotations(),
+ },
+ Spec: ciliumv2.NodeSpec{
+ Addresses: ipAddrs,
+ HealthAddressing: ciliumv2.HealthAddressingSpec{
+ IPv4: healthIPv4,
+ IPv6: healthIPv6,
+ },
+ IngressAddressing: ciliumv2.AddressPair{
+ IPV4: ingressIPv4,
+ IPV6: ingressIPv6,
+ },
+ Encryption: ciliumv2.EncryptionSpec{
+ Key: int(n.EncryptionKey),
+ },
+ IPAM: ipamTypes.IPAMSpec{
+ PodCIDRs: podCIDRs,
+ },
+ NodeIdentity: uint64(n.NodeIdentity),
+ },
+ }
+}
+
+// RegisterNode overloads GetKeyName to ignore the cluster name, as cluster name may not be stable during node registration.
+//
+// +k8s:deepcopy-gen=true
+type RegisterNode struct {
+ Node
+}
+
+// GetKeyName Overloaded key name w/o cluster name
+func (n *RegisterNode) GetKeyName() string {
+ return n.Name
+}
+
+// DeepKeyCopy creates a deep copy of the LocalKey
+func (n *RegisterNode) DeepKeyCopy() store.LocalKey {
+ return n.DeepCopy()
+}
+
+// Node contains the nodes name, the list of addresses to this address
+//
+// +k8s:deepcopy-gen=true
+type Node struct {
+ // Name is the name of the node. This is typically the hostname of the node.
+ Name string
+
+ // Cluster is the name of the cluster the node is associated with
+ Cluster string
+
+ IPAddresses []Address
+
+ // IPv4AllocCIDR if set, is the IPv4 address pool out of which the node
+ // allocates IPs for local endpoints from
+ IPv4AllocCIDR *cidr.CIDR
+
+ // IPv4SecondaryAllocCIDRs contains additional IPv4 CIDRs from which this
+ //node allocates IPs for its local endpoints from
+ IPv4SecondaryAllocCIDRs []*cidr.CIDR
+
+ // IPv6AllocCIDR if set, is the IPv6 address pool out of which the node
+ // allocates IPs for local endpoints from
+ IPv6AllocCIDR *cidr.CIDR
+
+ // IPv6SecondaryAllocCIDRs contains additional IPv6 CIDRs from which this
+ // node allocates IPs for its local endpoints from
+ IPv6SecondaryAllocCIDRs []*cidr.CIDR
+
+ // IPv4HealthIP if not nil, this is the IPv4 address of the
+ // cilium-health endpoint located on the node.
+ IPv4HealthIP net.IP
+
+ // IPv6HealthIP if not nil, this is the IPv6 address of the
+ // cilium-health endpoint located on the node.
+ IPv6HealthIP net.IP
+
+ // IPv4IngressIP if not nil, this is the IPv4 address of the
+ // Ingress listener on the node.
+ IPv4IngressIP net.IP
+
+ // IPv6IngressIP if not nil, this is the IPv6 address of the
+ // Ingress listener located on the node.
+ IPv6IngressIP net.IP
+
+ // ClusterID is the unique identifier of the cluster
+ ClusterID uint32
+
+ // Source is the source where the node configuration was generated / created.
+ Source source.Source
+
+ // Key index used for transparent encryption or 0 for no encryption
+ EncryptionKey uint8
+
+ // Node labels
+ Labels map[string]string
+
+ // Node annotations
+ Annotations map[string]string
+
+ // NodeIdentity is the numeric identity allocated for the node
+ NodeIdentity uint32
+
+ // WireguardPubKey is the WireGuard public key of this node
+ WireguardPubKey string
+}
+
+// Fullname returns the node's full name including the cluster name if a
+// cluster name value other than the default value has been specified
+func (n *Node) Fullname() string {
+ if n.Cluster != defaults.ClusterName {
+ return path.Join(n.Cluster, n.Name)
+ }
+
+ return n.Name
+}
+
+// Address is a node address which contains an IP and the address type.
+//
+// +k8s:deepcopy-gen=true
+type Address struct {
+ Type addressing.AddressType
+ IP net.IP
+}
+
+// GetNodeIP returns one of the node's IP addresses available with the
+// following priority:
+// - NodeInternalIP
+// - NodeExternalIP
+// - other IP address type
+func (n *Node) GetNodeIP(ipv6 bool) net.IP {
+ var backupIP net.IP
+ for _, addr := range n.IPAddresses {
+ if (ipv6 && addr.IP.To4() != nil) ||
+ (!ipv6 && addr.IP.To4() == nil) {
+ continue
+ }
+ switch addr.Type {
+ // Ignore CiliumInternalIPs
+ case addressing.NodeCiliumInternalIP:
+ continue
+ // Always prefer a cluster internal IP
+ case addressing.NodeInternalIP:
+ return addr.IP
+ case addressing.NodeExternalIP:
+ // Fall back to external Node IP
+ // if no internal IP could be found
+ backupIP = addr.IP
+ default:
+ // As a last resort, if no internal or external
+ // IP was found, use any node address available
+ if backupIP == nil {
+ backupIP = addr.IP
+ }
+ }
+ }
+ return backupIP
+}
+
+// GetExternalIP returns ExternalIP of k8s Node. If not present, then it
+// returns nil;
+func (n *Node) GetExternalIP(ipv6 bool) net.IP {
+ for _, addr := range n.IPAddresses {
+ if (ipv6 && addr.IP.To4() != nil) || (!ipv6 && addr.IP.To4() == nil) {
+ continue
+ }
+ if addr.Type == addressing.NodeExternalIP {
+ return addr.IP
+ }
+ }
+
+ return nil
+}
+
+// GetK8sNodeIPs returns k8s Node IP (either InternalIP or ExternalIP or nil;
+// the former is preferred).
+func (n *Node) GetK8sNodeIP() net.IP {
+ var externalIP net.IP
+
+ for _, addr := range n.IPAddresses {
+ if addr.Type == addressing.NodeInternalIP {
+ return addr.IP
+ } else if addr.Type == addressing.NodeExternalIP {
+ externalIP = addr.IP
+ }
+ }
+
+ return externalIP
+}
+
+// GetNodeInternalIP returns the Internal IPv4 of node or nil.
+func (n *Node) GetNodeInternalIPv4() net.IP {
+ for _, addr := range n.IPAddresses {
+ if addr.IP.To4() == nil {
+ continue
+ }
+ if addr.Type == addressing.NodeInternalIP {
+ return addr.IP
+ }
+ }
+
+ return nil
+}
+
+// GetNodeInternalIP returns the Internal IPv6 of node or nil.
+func (n *Node) GetNodeInternalIPv6() net.IP {
+ for _, addr := range n.IPAddresses {
+ if addr.IP.To4() != nil {
+ continue
+ }
+ if addr.Type == addressing.NodeInternalIP {
+ return addr.IP
+ }
+ }
+
+ return nil
+}
+
+// GetCiliumInternalIP returns the CiliumInternalIP e.g. the IP associated
+// with cilium_host on the node.
+func (n *Node) GetCiliumInternalIP(ipv6 bool) net.IP {
+ for _, addr := range n.IPAddresses {
+ if (ipv6 && addr.IP.To4() != nil) ||
+ (!ipv6 && addr.IP.To4() == nil) {
+ continue
+ }
+ if addr.Type == addressing.NodeCiliumInternalIP {
+ return addr.IP
+ }
+ }
+ return nil
+}
+
+// SetCiliumInternalIP sets the CiliumInternalIP e.g. the IP associated
+// with cilium_host on the node.
+func (n *Node) SetCiliumInternalIP(newAddr net.IP) {
+ n.setAddress(addressing.NodeCiliumInternalIP, newAddr)
+}
+
+// SetNodeExternalIP sets the NodeExternalIP.
+func (n *Node) SetNodeExternalIP(newAddr net.IP) {
+ n.setAddress(addressing.NodeExternalIP, newAddr)
+}
+
+// SetNodeInternalIP sets the NodeInternalIP.
+func (n *Node) SetNodeInternalIP(newAddr net.IP) {
+ n.setAddress(addressing.NodeInternalIP, newAddr)
+}
+
+func (n *Node) RemoveAddresses(typ addressing.AddressType) {
+ newAddresses := []Address{}
+ for _, addr := range n.IPAddresses {
+ if addr.Type != typ {
+ newAddresses = append(newAddresses, addr)
+ }
+ }
+ n.IPAddresses = newAddresses
+}
+
+func (n *Node) setAddress(typ addressing.AddressType, newIP net.IP) {
+ newAddr := Address{Type: typ, IP: newIP}
+
+ if newIP == nil {
+ n.RemoveAddresses(typ)
+ return
+ }
+
+ ipv6 := newIP.To4() == nil
+ // Try first to replace an existing address with same type
+ for i, addr := range n.IPAddresses {
+ if addr.Type != typ {
+ continue
+ }
+ if ipv6 != (addr.IP.To4() == nil) {
+ // Don't replace if address family is different.
+ continue
+ }
+ n.IPAddresses[i] = newAddr
+ return
+ }
+ n.IPAddresses = append(n.IPAddresses, newAddr)
+
+}
+
+func (n *Node) GetIPByType(addrType addressing.AddressType, ipv6 bool) net.IP {
+ for _, addr := range n.IPAddresses {
+ if addr.Type != addrType {
+ continue
+ }
+ if is4 := addr.IP.To4() != nil; (!ipv6 && is4) || (ipv6 && !is4) {
+ return addr.IP
+ }
+ }
+ return nil
+}
+
+func (n *Node) getPrimaryAddress() *models.NodeAddressing {
+ v4 := n.GetNodeIP(false)
+ v6 := n.GetNodeIP(true)
+
+ var ipv4AllocStr, ipv6AllocStr string
+ if n.IPv4AllocCIDR != nil {
+ ipv4AllocStr = n.IPv4AllocCIDR.String()
+ }
+ if n.IPv6AllocCIDR != nil {
+ ipv6AllocStr = n.IPv6AllocCIDR.String()
+ }
+
+ var v4Str, v6Str string
+ if v4 != nil {
+ v4Str = v4.String()
+ }
+ if v6 != nil {
+ v6Str = v6.String()
+ }
+
+ return &models.NodeAddressing{
+ IPV4: &models.NodeAddressingElement{
+ Enabled: option.Config.EnableIPv4,
+ IP: v4Str,
+ AllocRange: ipv4AllocStr,
+ },
+ IPV6: &models.NodeAddressingElement{
+ Enabled: option.Config.EnableIPv6,
+ IP: v6Str,
+ AllocRange: ipv6AllocStr,
+ },
+ }
+}
+
+func (n *Node) isPrimaryAddress(addr Address, ipv4 bool) bool {
+ return addr.IP.String() == n.GetNodeIP(!ipv4).String()
+}
+
+func (n *Node) getSecondaryAddresses() []*models.NodeAddressingElement {
+ result := []*models.NodeAddressingElement{}
+
+ for _, addr := range n.IPAddresses {
+ ipv4 := false
+ if addr.IP.To4() != nil {
+ ipv4 = true
+ }
+ if !n.isPrimaryAddress(addr, ipv4) {
+ result = append(result, &models.NodeAddressingElement{
+ IP: addr.IP.String(),
+ })
+ }
+ }
+
+ return result
+}
+
+func (n *Node) getHealthAddresses() *models.NodeAddressing {
+ if n.IPv4HealthIP == nil && n.IPv6HealthIP == nil {
+ return nil
+ }
+
+ var v4Str, v6Str string
+ if n.IPv4HealthIP != nil {
+ v4Str = n.IPv4HealthIP.String()
+ }
+ if n.IPv6HealthIP != nil {
+ v6Str = n.IPv6HealthIP.String()
+ }
+
+ return &models.NodeAddressing{
+ IPV4: &models.NodeAddressingElement{
+ Enabled: option.Config.EnableIPv4,
+ IP: v4Str,
+ },
+ IPV6: &models.NodeAddressingElement{
+ Enabled: option.Config.EnableIPv6,
+ IP: v6Str,
+ },
+ }
+}
+
+func (n *Node) getIngressAddresses() *models.NodeAddressing {
+ if n.IPv4IngressIP == nil && n.IPv6IngressIP == nil {
+ return nil
+ }
+
+ var v4Str, v6Str string
+ if n.IPv4IngressIP != nil {
+ v4Str = n.IPv4IngressIP.String()
+ }
+ if n.IPv6IngressIP != nil {
+ v6Str = n.IPv6IngressIP.String()
+ }
+
+ return &models.NodeAddressing{
+ IPV4: &models.NodeAddressingElement{
+ Enabled: option.Config.EnableIPv4,
+ IP: v4Str,
+ },
+ IPV6: &models.NodeAddressingElement{
+ Enabled: option.Config.EnableIPv6,
+ IP: v6Str,
+ },
+ }
+}
+
+// GetModel returns the API model representation of a node.
+func (n *Node) GetModel() *models.NodeElement {
+ return &models.NodeElement{
+ Name: n.Fullname(),
+ PrimaryAddress: n.getPrimaryAddress(),
+ SecondaryAddresses: n.getSecondaryAddresses(),
+ HealthEndpointAddress: n.getHealthAddresses(),
+ IngressAddress: n.getIngressAddresses(),
+ }
+}
+
+// Identity returns the identity of the node
+func (n *Node) Identity() Identity {
+ return Identity{
+ Name: n.Name,
+ Cluster: n.Cluster,
+ }
+}
+
+func getCluster() string {
+ return option.Config.ClusterName
+}
+
+// IsLocal returns true if this is the node on which the agent itself is
+// running on
+func (n *Node) IsLocal() bool {
+ return n != nil && n.Name == GetName() && n.Cluster == getCluster()
+}
+
+func (n *Node) GetIPv4AllocCIDRs() []*cidr.CIDR {
+ result := make([]*cidr.CIDR, 0, len(n.IPv4SecondaryAllocCIDRs)+1)
+ if n.IPv4AllocCIDR != nil {
+ result = append(result, n.IPv4AllocCIDR)
+ }
+ if len(n.IPv4SecondaryAllocCIDRs) > 0 {
+ result = append(result, n.IPv4SecondaryAllocCIDRs...)
+ }
+ return result
+}
+
+func (n *Node) GetIPv6AllocCIDRs() []*cidr.CIDR {
+ result := make([]*cidr.CIDR, 0, len(n.IPv6SecondaryAllocCIDRs)+1)
+ if n.IPv6AllocCIDR != nil {
+ result = append(result, n.IPv6AllocCIDR)
+ }
+ if len(n.IPv6SecondaryAllocCIDRs) > 0 {
+ result = append(result, n.IPv6SecondaryAllocCIDRs...)
+ }
+ return result
+}
+
+// GetKeyNodeName constructs the API name for the given cluster and node name.
+func GetKeyNodeName(cluster, node string) string {
+ // WARNING - STABLE API: Changing the structure of the key may break
+ // backwards compatibility
+ return path.Join(cluster, node)
+}
+
+// GetKeyName returns the kvstore key to be used for the node
+func (n *Node) GetKeyName() string {
+ return GetKeyNodeName(n.Cluster, n.Name)
+}
+
+// DeepKeyCopy creates a deep copy of the LocalKey
+func (n *Node) DeepKeyCopy() store.LocalKey {
+ return n.DeepCopy()
+}
+
+// Marshal returns the node object as JSON byte slice
+func (n *Node) Marshal() ([]byte, error) {
+ return json.Marshal(n)
+}
+
+// Unmarshal parses the JSON byte slice and updates the node receiver
+func (n *Node) Unmarshal(_ string, data []byte) error {
+ newNode := Node{}
+ if err := json.Unmarshal(data, &newNode); err != nil {
+ return err
+ }
+
+ if err := newNode.validate(); err != nil {
+ return err
+ }
+
+ *n = newNode
+
+ return nil
+}
+
+func (n *Node) validate() error {
+ // Skip the ClusterID check if it matches the local one, as we assume that
+ // it has already been validated, and to allow it to be zero.
+ if n.ClusterID != option.Config.ClusterID {
+ if err := cmtypes.ValidateClusterID(n.ClusterID); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/node/types/nodename.go b/vendor/github.com/cilium/cilium/pkg/node/types/nodename.go
new file mode 100644
index 000000000..8faa30c3e
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/node/types/nodename.go
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package types
+
+import (
+ "os"
+
+ "github.com/cilium/cilium/pkg/defaults"
+ k8sConsts "github.com/cilium/cilium/pkg/k8s/constants"
+ "github.com/cilium/cilium/pkg/logging/logfields"
+ "github.com/cilium/cilium/pkg/option"
+)
+
+var (
+ nodeName = "localhost"
+)
+
+// SetName sets the name of the local node. This will overwrite the value that
+// is automatically retrieved with `os.Hostname()`.
+//
+// Note: This function is currently designed to only be called during the
+// bootstrapping procedure of the agent where no parallelism exists. If you
+// want to use this function in later stages, a mutex must be added first.
+func SetName(name string) {
+ nodeName = name
+}
+
+// GetName returns the name of the local node. The value returned was either
+// previously set with SetName(), retrieved via `os.Hostname()`, or as a last
+// resort is hardcoded to "localhost".
+func GetName() string {
+ return nodeName
+}
+
+// GetAbsoluteNodeName returns the absolute node name combined of both
+// (prefixed)cluster name and the local node name in case of
+// clustered environments otherwise returns the name of the local node.
+func GetAbsoluteNodeName() string {
+ if clusterName := GetClusterName(); clusterName != "" {
+ return clusterName + "/" + nodeName
+ } else {
+ return nodeName
+ }
+}
+
+func GetClusterName() string {
+ if option.Config.ClusterName != "" &&
+ option.Config.ClusterName != defaults.ClusterName {
+ return option.Config.ClusterName
+ } else {
+ return ""
+ }
+}
+
+func init() {
+ // Give priority to the environment variable available in the Cilium agent
+ if name := os.Getenv(k8sConsts.EnvNodeNameSpec); name != "" {
+ nodeName = name
+ return
+ }
+ if h, err := os.Hostname(); err != nil {
+ log.WithError(err).Warn("Unable to retrieve local hostname")
+ } else {
+ log.WithField(logfields.NodeName, h).Debug("os.Hostname() returned")
+ nodeName = h
+ }
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/node/types/zz_generated.deepcopy.go b/vendor/github.com/cilium/cilium/pkg/node/types/zz_generated.deepcopy.go
new file mode 100644
index 000000000..6f78dd8c5
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/node/types/zz_generated.deepcopy.go
@@ -0,0 +1,138 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package types
+
+import (
+ net "net"
+
+ cidr "github.com/cilium/cilium/pkg/cidr"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Address) DeepCopyInto(out *Address) {
+ *out = *in
+ if in.IP != nil {
+ in, out := &in.IP, &out.IP
+ *out = make(net.IP, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Address.
+func (in *Address) DeepCopy() *Address {
+ if in == nil {
+ return nil
+ }
+ out := new(Address)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Node) DeepCopyInto(out *Node) {
+ *out = *in
+ if in.IPAddresses != nil {
+ in, out := &in.IPAddresses, &out.IPAddresses
+ *out = make([]Address, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.IPv4AllocCIDR != nil {
+ in, out := &in.IPv4AllocCIDR, &out.IPv4AllocCIDR
+ *out = (*in).DeepCopy()
+ }
+ if in.IPv4SecondaryAllocCIDRs != nil {
+ in, out := &in.IPv4SecondaryAllocCIDRs, &out.IPv4SecondaryAllocCIDRs
+ *out = make([]*cidr.CIDR, len(*in))
+ for i := range *in {
+ if (*in)[i] != nil {
+ in, out := &(*in)[i], &(*out)[i]
+ *out = (*in).DeepCopy()
+ }
+ }
+ }
+ if in.IPv6AllocCIDR != nil {
+ in, out := &in.IPv6AllocCIDR, &out.IPv6AllocCIDR
+ *out = (*in).DeepCopy()
+ }
+ if in.IPv6SecondaryAllocCIDRs != nil {
+ in, out := &in.IPv6SecondaryAllocCIDRs, &out.IPv6SecondaryAllocCIDRs
+ *out = make([]*cidr.CIDR, len(*in))
+ for i := range *in {
+ if (*in)[i] != nil {
+ in, out := &(*in)[i], &(*out)[i]
+ *out = (*in).DeepCopy()
+ }
+ }
+ }
+ if in.IPv4HealthIP != nil {
+ in, out := &in.IPv4HealthIP, &out.IPv4HealthIP
+ *out = make(net.IP, len(*in))
+ copy(*out, *in)
+ }
+ if in.IPv6HealthIP != nil {
+ in, out := &in.IPv6HealthIP, &out.IPv6HealthIP
+ *out = make(net.IP, len(*in))
+ copy(*out, *in)
+ }
+ if in.IPv4IngressIP != nil {
+ in, out := &in.IPv4IngressIP, &out.IPv4IngressIP
+ *out = make(net.IP, len(*in))
+ copy(*out, *in)
+ }
+ if in.IPv6IngressIP != nil {
+ in, out := &in.IPv6IngressIP, &out.IPv6IngressIP
+ *out = make(net.IP, len(*in))
+ copy(*out, *in)
+ }
+ if in.Labels != nil {
+ in, out := &in.Labels, &out.Labels
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.Annotations != nil {
+ in, out := &in.Annotations, &out.Annotations
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Node.
+func (in *Node) DeepCopy() *Node {
+ if in == nil {
+ return nil
+ }
+ out := new(Node)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *RegisterNode) DeepCopyInto(out *RegisterNode) {
+ *out = *in
+ in.Node.DeepCopyInto(&out.Node)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegisterNode.
+func (in *RegisterNode) DeepCopy() *RegisterNode {
+ if in == nil {
+ return nil
+ }
+ out := new(RegisterNode)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/option/config.go b/vendor/github.com/cilium/cilium/pkg/option/config.go
index 21090d2cb..bd7ebf5e8 100644
--- a/vendor/github.com/cilium/cilium/pkg/option/config.go
+++ b/vendor/github.com/cilium/cilium/pkg/option/config.go
@@ -8,6 +8,7 @@ import (
"encoding/json"
"errors"
"fmt"
+ "io"
"math"
"net"
"net/netip"
@@ -24,8 +25,10 @@ import (
"github.com/spf13/cast"
"github.com/spf13/cobra"
"github.com/spf13/viper"
+ "google.golang.org/protobuf/types/known/fieldmaskpb"
k8sLabels "k8s.io/apimachinery/pkg/labels"
+ flowpb "github.com/cilium/cilium/api/v1/flow"
"github.com/cilium/cilium/api/v1/models"
"github.com/cilium/cilium/pkg/cidr"
clustermeshTypes "github.com/cilium/cilium/pkg/clustermesh/types"
@@ -107,6 +110,9 @@ const (
// ConntrackGCInterval is the name of the ConntrackGCInterval option
ConntrackGCInterval = "conntrack-gc-interval"
+ // ConntrackGCMaxInterval is the name of the ConntrackGCMaxInterval option
+ ConntrackGCMaxInterval = "conntrack-gc-max-interval"
+
// DebugArg is the argument enables debugging mode
DebugArg = "debug"
@@ -133,9 +139,6 @@ const (
// EnableExternalIPs enables implementation of k8s services with externalIPs in datapath
EnableExternalIPs = "enable-external-ips"
- // K8sEnableEndpointSlice enables the k8s EndpointSlice feature into Cilium
- K8sEnableEndpointSlice = "enable-k8s-endpoint-slice"
-
// EnableL7Proxy is the name of the option to enable L7 proxy
EnableL7Proxy = "enable-l7-proxy"
@@ -369,6 +372,9 @@ const (
// EnableBPFMasquerade masquerades packets from endpoints leaving the host with BPF instead of iptables
EnableBPFMasquerade = "enable-bpf-masquerade"
+ // EnableMasqueradeRouteSource masquerades to the source route IP address instead of the interface one
+ EnableMasqueradeRouteSource = "enable-masquerade-to-route-source"
+
// DeriveMasqIPAddrFromDevice is device name which IP addr is used for BPF masquerades
DeriveMasqIPAddrFromDevice = "derive-masquerade-ip-addr-from-device"
@@ -545,12 +551,6 @@ const (
// ciliumEnvPrefix is the prefix used for environment variables
ciliumEnvPrefix = "CILIUM_"
- // ClusterName is the name of the ClusterName option
- ClusterName = "cluster-name"
-
- // ClusterIDName is the name of the ClusterID option
- ClusterIDName = "cluster-id"
-
// CNIChainingMode configures which CNI plugin Cilium is chained with.
CNIChainingMode = "cni-chaining-mode"
@@ -652,6 +652,10 @@ const (
// PolicyMapEntriesName configures max entries for BPF policymap.
PolicyMapEntriesName = "bpf-policy-map-max"
+ // PolicyMapFullReconciliationInterval sets the interval for performing the full
+ // reconciliation of the endpoint policy map.
+ PolicyMapFullReconciliationIntervalName = "bpf-policy-map-full-reconciliation-interval"
+
// SockRevNatEntriesName configures max entries for BPF sock reverse nat
// entries.
SockRevNatEntriesName = "bpf-sock-rev-map-max"
@@ -756,7 +760,7 @@ const (
// IPSecKeyFileName is the name of the option for ipsec key file
IPSecKeyFileName = "ipsec-key-file"
- // EnableWireguard is the name of the option to enable wireguard
+ // EnableWireguard is the name of the option to enable WireGuard
EnableWireguard = "enable-wireguard"
// EnableL2Announcements is the name of the option to enable l2 announcements
@@ -771,9 +775,23 @@ const (
// L2AnnouncerRetryPeriod, on renew failure, retry after X amount of time.
L2AnnouncerRetryPeriod = "l2-announcements-retry-period"
- // EnableWireguardUserspaceFallback is the name of the option that enables the fallback to wireguard userspace mode
+ // EnableEncryptionStrictMode is the name of the option to enable strict encryption mode.
+ EnableEncryptionStrictMode = "enable-encryption-strict-mode"
+
+ // EncryptionStrictModeCIDR is the CIDR in which the strict ecryption mode should be enforced.
+ EncryptionStrictModeCIDR = "encryption-strict-mode-cidr"
+
+ // EncryptionStrictModeAllowRemoteNodeIdentities allows dynamic lookup of remote node identities.
+ // This is required when tunneling is used
+ // or direct routing is used and the node CIDR and pod CIDR overlap.
+ EncryptionStrictModeAllowRemoteNodeIdentities = "encryption-strict-mode-allow-remote-node-identities"
+
+ // EnableWireguardUserspaceFallback is the name of the option that enables the fallback to WireGuard userspace mode
EnableWireguardUserspaceFallback = "enable-wireguard-userspace-fallback"
+ // WireguardPersistentKeepalivee controls Wireguard PersistentKeepalive option. Set 0 to disable.
+ WireguardPersistentKeepalive = "wireguard-persistent-keepalive"
+
// NodeEncryptionOptOutLabels is the name of the option for the node-to-node encryption opt-out labels
NodeEncryptionOptOutLabels = "node-encryption-opt-out-labels"
@@ -811,6 +829,9 @@ const (
// EnableHealthCheckNodePort is the name of the EnableHealthCheckNodePort option
EnableHealthCheckNodePort = "enable-health-check-nodeport"
+ // EnableHealthCheckLoadBalancerIP is the name of the EnableHealthCheckLoadBalancerIP option
+ EnableHealthCheckLoadBalancerIP = "enable-health-check-loadbalancer-ip"
+
// PolicyQueueSize is the size of the queues utilized by the policy
// repository.
PolicyQueueSize = "policy-queue-size"
@@ -891,9 +912,9 @@ const (
// IPv6NativeRoutingCIDR describes a v6 CIDR in which pod IPs are routable
IPv6NativeRoutingCIDR = "ipv6-native-routing-cidr"
- // EgressMasqueradeInterfaces is the selector used to select interfaces
- // subject to egress masquerading
- EgressMasqueradeInterfaces = "egress-masquerade-interfaces"
+ // MasqueradeInterfaces is the selector used to select interfaces subject to
+ // egress masquerading
+ MasqueradeInterfaces = "egress-masquerade-interfaces"
// PolicyTriggerInterval is the amount of time between triggers of policy
// updates are invoked.
@@ -986,6 +1007,15 @@ const (
// HubbleExportFileCompress specifies whether rotated files are compressed.
HubbleExportFileCompress = "hubble-export-file-compress"
+ // HubbleExportAllowlist specifies allow list filter use by exporter.
+ HubbleExportAllowlist = "hubble-export-allowlist"
+
+ // HubbleExportDenylist specifies deny list filter use by exporter.
+ HubbleExportDenylist = "hubble-export-denylist"
+
+ // HubbleExportFieldmask specifies list of fields to log in exporter.
+ HubbleExportFieldmask = "hubble-export-fieldmask"
+
// EnableHubbleRecorderAPI specifies if the Hubble Recorder API should be served
EnableHubbleRecorderAPI = "enable-hubble-recorder-api"
@@ -1006,9 +1036,14 @@ const (
// By default, Hubble observes all monitor events.
HubbleMonitorEvents = "hubble-monitor-events"
- // HubbleRedact controls which values Hubble will redact in network flows.
- // By default, Hubble does not redact any values.
- HubbleRedact = "hubble-redact"
+ // HubbleRedactEnabled controls if sensitive information will be redacted from L7 flows
+ HubbleRedactEnabled = "hubble-redact-enabled"
+
+ // HubbleRedactHttpURLQuery controls if the URL query will be redacted from flows
+ HubbleRedactHttpURLQuery = "hubble-redact-http-urlquery"
+
+ // HubbleRedactKafkaApiKey controls if the Kafka API key will be redacted from flows
+ HubbleRedactKafkaApiKey = "hubble-redact-kafka-apikey"
// DisableIptablesFeederRules specifies which chains will be excluded
// when installing the feeder rules
@@ -1068,13 +1103,6 @@ const (
// LBMaglevMapMaxEntries configures max entries of bpf map for Maglev.
LBMaglevMapMaxEntries = "bpf-lb-maglev-map-max"
- // K8sServiceProxyName instructs Cilium to handle service objects only when
- // service.kubernetes.io/service-proxy-name label equals the provided value.
- K8sServiceProxyName = "k8s-service-proxy-name"
-
- // APIRateLimitName enables configuration of the API rate limits
- APIRateLimitName = "api-rate-limit"
-
// CRDWaitTimeout is the timeout in which Cilium will exit if CRDs are not
// available.
CRDWaitTimeout = "crd-wait-timeout"
@@ -1098,6 +1126,9 @@ const (
// compatible with MetalLB's configuration.
BGPConfigPath = "bgp-config-path"
+ // BGPSecretsNamespace is the Kubernetes namespace to get BGP control plane secrets from.
+ BGPSecretsNamespace = "bgp-secrets-namespace"
+
// ExternalClusterIPName is the name of the option to enable
// cluster external access to ClusterIP services.
ExternalClusterIPName = "bpf-lb-external-clusterip"
@@ -1173,6 +1204,9 @@ const (
// EnableK8sNetworkPolicy enables support for K8s NetworkPolicy.
EnableK8sNetworkPolicy = "enable-k8s-networkpolicy"
+
+ // PolicyCIDRMatchMode defines the entities that CIDR selectors can reach
+ PolicyCIDRMatchMode = "policy-cidr-match-mode"
)
// Default string arguments
@@ -1551,6 +1585,10 @@ type DaemonConfig struct {
// endpoint may allow traffic to exchange traffic with.
PolicyMapEntries int
+ // PolicyMapFullReconciliationInterval is the interval at which to perform
+ // the full reconciliation of the endpoint policy map.
+ PolicyMapFullReconciliationInterval time.Duration
+
// SockRevNatEntries is the maximum number of sock rev nat mappings
// allowed in the BPF rev nat table
SockRevNatEntries int
@@ -1701,9 +1739,23 @@ type DaemonConfig struct {
// EnableWireguard enables Wireguard encryption
EnableWireguard bool
+ // EnableEncryptionStrictMode enables strict mode for encryption
+ EnableEncryptionStrictMode bool
+
+ // EncryptionStrictModeCIDR is the CIDR to use for strict mode
+ EncryptionStrictModeCIDR netip.Prefix
+
+ // EncryptionStrictModeAllowRemoteNodeIdentities allows dynamic lookup of node identities.
+ // This is required when tunneling is used
+ // or direct routing is used and the node CIDR and pod CIDR overlap.
+ EncryptionStrictModeAllowRemoteNodeIdentities bool
+
// EnableWireguardUserspaceFallback enables the fallback to the userspace implementation
EnableWireguardUserspaceFallback bool
+ // WireguardPersistentKeepalive controls Wireguard PersistentKeepalive option.
+ WireguardPersistentKeepalive time.Duration
+
// EnableL2Announcements enables L2 announcement of service IPs
EnableL2Announcements bool
@@ -1763,29 +1815,31 @@ type DaemonConfig struct {
// Masquerade specifies whether or not to masquerade packets from endpoints
// leaving the host.
- EnableIPv4Masquerade bool
- EnableIPv6Masquerade bool
- EnableBPFMasquerade bool
- DeriveMasqIPAddrFromDevice string
- EnableBPFClockProbe bool
- EnableIPMasqAgent bool
- EnableIPv4EgressGateway bool
- EnableEnvoyConfig bool
- EnableIngressController bool
- EnableGatewayAPI bool
- EnvoyConfigTimeout time.Duration
- IPMasqAgentConfigPath string
- InstallIptRules bool
- MonitorAggregation string
- PreAllocateMaps bool
- IPv6NodeAddr string
- IPv4NodeAddr string
- SidecarIstioProxyImage string
- SocketPath string
- TracePayloadlen int
- Version string
- PrometheusServeAddr string
- ToFQDNsMinTTL int
+ EnableIPv4Masquerade bool
+ EnableIPv6Masquerade bool
+ EnableBPFMasquerade bool
+ EnableMasqueradeRouteSource bool
+ EnableIPMasqAgent bool
+ DeriveMasqIPAddrFromDevice string
+ IPMasqAgentConfigPath string
+
+ EnableBPFClockProbe bool
+ EnableIPv4EgressGateway bool
+ EnableEnvoyConfig bool
+ EnableIngressController bool
+ EnableGatewayAPI bool
+ EnvoyConfigTimeout time.Duration
+ InstallIptRules bool
+ MonitorAggregation string
+ PreAllocateMaps bool
+ IPv6NodeAddr string
+ IPv4NodeAddr string
+ SidecarIstioProxyImage string
+ SocketPath string
+ TracePayloadlen int
+ Version string
+ PrometheusServeAddr string
+ ToFQDNsMinTTL int
// DNSMaxIPsPerRestoredRule defines the maximum number of IPs to maintain
// for each FQDN selector in endpoint's restored DNS rules
@@ -1878,6 +1932,10 @@ type DaemonConfig struct {
// cilium
EnableHealthCheckNodePort bool
+ // EnableHealthCheckLoadBalancerIP enables health checking of LoadBalancerIP
+ // by cilium
+ EnableHealthCheckLoadBalancerIP bool
+
// KVstoreKeepAliveInterval is the interval in which the lease is being
// renewed. This must be set to a value lesser than the LeaseTTL ideally
// by a factor of 3.
@@ -1928,6 +1986,10 @@ type DaemonConfig struct {
// interval
ConntrackGCInterval time.Duration
+ // ConntrackGCMaxInterval if set limits the automatic GC interval calculation to
+ // the specified maximum value.
+ ConntrackGCMaxInterval time.Duration
+
// K8sEventHandover enables use of the kvstore to optimize Kubernetes
// event handling by listening for k8s events in the operator and
// mirroring it into the kvstore for reduced overhead in large
@@ -2052,10 +2114,6 @@ type DaemonConfig struct {
// EnableLocalRedirectPolicy enables redirect policies to redirect traffic within nodes
EnableLocalRedirectPolicy bool
- // K8sEnableEndpointSlice enables k8s endpoint slice feature that is used
- // in kubernetes.
- K8sEnableK8sEndpointSlice bool
-
// NodePortMin is the minimum port address for the NodePort range
NodePortMin int
@@ -2109,8 +2167,11 @@ type DaemonConfig struct {
// IPv6NativeRoutingCIDR describes a CIDR in which pod IPs are routable
IPv6NativeRoutingCIDR *cidr.CIDR
- // EgressMasqueradeInterfaces is the selector used to select interfaces
- // subject to egress masquerading
+ // MasqueradeInterfaces is the selector used to select interfaces subject
+ // to egress masquerading. EgressMasqueradeInterfaces is the same but as
+ // a string representation. It's deprecated and can be removed once the GH
+ // issue https://github.com/cilium/cilium-cli/issues/1896 is fixed.
+ MasqueradeInterfaces []string
EgressMasqueradeInterfaces string
// PolicyTriggerInterval is the amount of time between when policy updates
@@ -2200,6 +2261,15 @@ type DaemonConfig struct {
// HubbleExportFileCompress specifies whether rotated files are compressed.
HubbleExportFileCompress bool
+ // HubbleExportAllowlist specifies allow list filter use by exporter.
+ HubbleExportAllowlist []*flowpb.FlowFilter
+
+ // HubbleExportDenylist specifies deny list filter use by exporter.
+ HubbleExportDenylist []*flowpb.FlowFilter
+
+ // HubbleExportFieldmask specifies list of fields to log in exporter.
+ HubbleExportFieldmask []string
+
// EnableHubbleRecorderAPI specifies if the Hubble Recorder API should be served
EnableHubbleRecorderAPI bool
@@ -2220,9 +2290,14 @@ type DaemonConfig struct {
// By default, Hubble observes all monitor events.
HubbleMonitorEvents []string
- // HubbleRedact controls which values Hubble will redact in network flows.
- // By default, Hubble does not redact any values.
- HubbleRedact []string
+ // HubbleRedactEnabled controls if Hubble will be redacting sensitive information from L7 flows
+ HubbleRedactEnabled bool
+
+ // HubbleRedactURLQuery controls if the URL query will be redacted from flows
+ HubbleRedactHttpURLQuery bool
+
+ // HubbleRedactKafkaApiKey controls if Kafka API key will be redacted from flows
+ HubbleRedactKafkaApiKey bool
// EndpointStatus enables population of information in the
// CiliumEndpoint.Status resource
@@ -2284,16 +2359,6 @@ type DaemonConfig struct {
// LBMaglevMapEntries is the maximum number of entries allowed in BPF lbmap for maglev.
LBMaglevMapEntries int
- // K8sServiceProxyName is the value of service.kubernetes.io/service-proxy-name label,
- // that identifies the service objects Cilium should handle.
- // If the provided value is an empty string, Cilium will manage service objects when
- // the label is not present. For more details -
- // https://github.com/kubernetes/enhancements/tree/master/keps/sig-network/2447-Make-kube-proxy-service-abstraction-optional
- K8sServiceProxyName string
-
- // APIRateLimitName enables configuration of the API rate limits
- APIRateLimit map[string]string
-
// CRDWaitTimeout is the timeout in which Cilium will exit if CRDs are not
// available.
CRDWaitTimeout time.Duration
@@ -2321,6 +2386,9 @@ type DaemonConfig struct {
// compatible with MetalLB's configuration.
BGPConfigPath string
+ // BGPSecretsNamespace is the Kubernetes namespace to get BGP control plane secrets from.
+ BGPSecretsNamespace string
+
// ExternalClusterIP enables routing to ClusterIP services from outside
// the cluster. This mirrors the behaviour of kube-proxy.
ExternalClusterIP bool
@@ -2396,56 +2464,62 @@ type DaemonConfig struct {
// EnableK8sNetworkPolicy enables support for K8s NetworkPolicy.
EnableK8sNetworkPolicy bool
+
+ // PolicyCIDRMatchMode is the list of entities that can be selected by CIDR policy.
+ // Currently supported values:
+ // - world
+ // - world, remote-node
+ PolicyCIDRMatchMode []string
}
var (
// Config represents the daemon configuration
Config = &DaemonConfig{
- CreationTime: time.Now(),
- Opts: NewIntOptions(&DaemonOptionLibrary),
- Monitor: &models.MonitorStatus{Cpus: int64(runtime.NumCPU()), Npages: 64, Pagesize: int64(os.Getpagesize()), Lost: 0, Unknown: 0},
- IPv6ClusterAllocCIDR: defaults.IPv6ClusterAllocCIDR,
- IPv6ClusterAllocCIDRBase: defaults.IPv6ClusterAllocCIDRBase,
- EnableHostIPRestore: defaults.EnableHostIPRestore,
- EnableHealthChecking: defaults.EnableHealthChecking,
- EnableEndpointHealthChecking: defaults.EnableEndpointHealthChecking,
- EnableHealthCheckNodePort: defaults.EnableHealthCheckNodePort,
- EnableIPv4: defaults.EnableIPv4,
- EnableIPv6: defaults.EnableIPv6,
- EnableIPv6NDP: defaults.EnableIPv6NDP,
- EnableSCTP: defaults.EnableSCTP,
- EnableL7Proxy: defaults.EnableL7Proxy,
- EndpointStatus: make(map[string]struct{}),
- DNSMaxIPsPerRestoredRule: defaults.DNSMaxIPsPerRestoredRule,
- ToFQDNsMaxIPsPerHost: defaults.ToFQDNsMaxIPsPerHost,
- KVstorePeriodicSync: defaults.KVstorePeriodicSync,
- KVstoreConnectivityTimeout: defaults.KVstoreConnectivityTimeout,
- IPAllocationTimeout: defaults.IPAllocationTimeout,
- IdentityChangeGracePeriod: defaults.IdentityChangeGracePeriod,
- IdentityRestoreGracePeriod: defaults.IdentityRestoreGracePeriod,
- FixedIdentityMapping: make(map[string]string),
- KVStoreOpt: make(map[string]string),
- LogOpt: make(map[string]string),
- LoopbackIPv4: defaults.LoopbackIPv4,
- EnableEndpointRoutes: defaults.EnableEndpointRoutes,
- AnnotateK8sNode: defaults.AnnotateK8sNode,
- K8sServiceCacheSize: defaults.K8sServiceCacheSize,
- AutoCreateCiliumNodeResource: defaults.AutoCreateCiliumNodeResource,
- IdentityAllocationMode: IdentityAllocationModeKVstore,
- AllowICMPFragNeeded: defaults.AllowICMPFragNeeded,
- EnableWellKnownIdentities: defaults.EnableWellKnownIdentities,
- K8sEnableK8sEndpointSlice: defaults.K8sEnableEndpointSlice,
- AllocatorListTimeout: defaults.AllocatorListTimeout,
- EnableICMPRules: defaults.EnableICMPRules,
- UseCiliumInternalIPForIPsec: defaults.UseCiliumInternalIPForIPsec,
+ CreationTime: time.Now(),
+ Opts: NewIntOptions(&DaemonOptionLibrary),
+ Monitor: &models.MonitorStatus{Cpus: int64(runtime.NumCPU()), Npages: 64, Pagesize: int64(os.Getpagesize()), Lost: 0, Unknown: 0},
+ IPv6ClusterAllocCIDR: defaults.IPv6ClusterAllocCIDR,
+ IPv6ClusterAllocCIDRBase: defaults.IPv6ClusterAllocCIDRBase,
+ EnableHostIPRestore: defaults.EnableHostIPRestore,
+ EnableHealthChecking: defaults.EnableHealthChecking,
+ EnableEndpointHealthChecking: defaults.EnableEndpointHealthChecking,
+ EnableHealthCheckLoadBalancerIP: defaults.EnableHealthCheckLoadBalancerIP,
+ EnableHealthCheckNodePort: defaults.EnableHealthCheckNodePort,
+ EnableIPv4: defaults.EnableIPv4,
+ EnableIPv6: defaults.EnableIPv6,
+ EnableIPv6NDP: defaults.EnableIPv6NDP,
+ EnableSCTP: defaults.EnableSCTP,
+ EnableL7Proxy: defaults.EnableL7Proxy,
+ EndpointStatus: make(map[string]struct{}),
+ DNSMaxIPsPerRestoredRule: defaults.DNSMaxIPsPerRestoredRule,
+ ToFQDNsMaxIPsPerHost: defaults.ToFQDNsMaxIPsPerHost,
+ KVstorePeriodicSync: defaults.KVstorePeriodicSync,
+ KVstoreConnectivityTimeout: defaults.KVstoreConnectivityTimeout,
+ IPAllocationTimeout: defaults.IPAllocationTimeout,
+ IdentityChangeGracePeriod: defaults.IdentityChangeGracePeriod,
+ IdentityRestoreGracePeriod: defaults.IdentityRestoreGracePeriod,
+ FixedIdentityMapping: make(map[string]string),
+ KVStoreOpt: make(map[string]string),
+ LogOpt: make(map[string]string),
+ LoopbackIPv4: defaults.LoopbackIPv4,
+ EnableEndpointRoutes: defaults.EnableEndpointRoutes,
+ AnnotateK8sNode: defaults.AnnotateK8sNode,
+ K8sServiceCacheSize: defaults.K8sServiceCacheSize,
+ AutoCreateCiliumNodeResource: defaults.AutoCreateCiliumNodeResource,
+ IdentityAllocationMode: IdentityAllocationModeKVstore,
+ AllowICMPFragNeeded: defaults.AllowICMPFragNeeded,
+ EnableWellKnownIdentities: defaults.EnableWellKnownIdentities,
+ AllocatorListTimeout: defaults.AllocatorListTimeout,
+ EnableICMPRules: defaults.EnableICMPRules,
+ UseCiliumInternalIPForIPsec: defaults.UseCiliumInternalIPForIPsec,
K8sEnableLeasesFallbackDiscovery: defaults.K8sEnableLeasesFallbackDiscovery,
- APIRateLimit: make(map[string]string),
ExternalClusterIP: defaults.ExternalClusterIP,
EnableVTEP: defaults.EnableVTEP,
EnableBGPControlPlane: defaults.EnableBGPControlPlane,
EnableK8sNetworkPolicy: defaults.EnableK8sNetworkPolicy,
+ PolicyCIDRMatchMode: defaults.PolicyCIDRMatchMode,
}
)
@@ -2567,7 +2641,7 @@ func (c *DaemonConfig) TunnelDevice() string {
// takes care of the MTU overhead. So no need to take it into account here.
// See encap_geneve_dsr_opt[4,6] in nodeport.h
func (c *DaemonConfig) TunnelExists() bool {
- return c.TunnelingEnabled() || c.EnableIPv4EgressGateway || c.EnableHighScaleIPcache
+ return c.TunnelingEnabled() || c.EgressGatewayCommonEnabled() || c.EnableHighScaleIPcache
}
// AreDevicesRequired returns true if the agent needs to attach to the native
@@ -2670,23 +2744,6 @@ func (c *DaemonConfig) EndpointStatusIsEnabled(option string) bool {
return ok
}
-// LocalClusterName returns the name of the cluster Cilium is deployed in
-func (c *DaemonConfig) LocalClusterName() string {
- return c.ClusterName
-}
-
-// LocalClusterID returns the ID of the cluster local to the Cilium agent.
-func (c *DaemonConfig) LocalClusterID() uint32 {
- return c.ClusterID
-}
-
-// K8sServiceProxyName returns the required value for the
-// service.kubernetes.io/service-proxy-name label in order for services to be
-// handled.
-func (c *DaemonConfig) K8sServiceProxyNameValue() string {
- return c.K8sServiceProxyName
-}
-
// CiliumNamespaceName returns the name of the namespace in which Cilium is
// deployed in
func (c *DaemonConfig) CiliumNamespaceName() string {
@@ -2718,6 +2775,34 @@ func (c *DaemonConfig) K8sGatewayAPIEnabled() bool {
return c.EnableGatewayAPI
}
+// EgressGatewayCommonEnabled returns true if at least one egress gateway implementation
+// is enabled.
+func (c *DaemonConfig) EgressGatewayCommonEnabled() bool {
+ return c.EnableIPv4EgressGateway
+}
+
+func (c *DaemonConfig) PolicyCIDRMatchesNodes() bool {
+ for _, mode := range c.PolicyCIDRMatchMode {
+ if mode == "nodes" {
+ return true
+ }
+ }
+ return false
+}
+
+func (c *DaemonConfig) validatePolicyCIDRMatchMode() error {
+ // Currently, the only acceptable values is "nodes".
+ for _, mode := range c.PolicyCIDRMatchMode {
+ switch mode {
+ case "nodes":
+ continue
+ default:
+ return fmt.Errorf("unknown CIDR match mode: %s", mode)
+ }
+ }
+ return nil
+}
+
// DirectRoutingDeviceRequired return whether the Direct Routing Device is needed under
// the current configuration.
func (c *DaemonConfig) DirectRoutingDeviceRequired() bool {
@@ -2730,7 +2815,7 @@ func (c *DaemonConfig) DirectRoutingDeviceRequired() bool {
return true
}
- return (c.EnableNodePort || BPFHostRoutingEnabled || Config.EnableWireguard) && !c.TunnelingEnabled()
+ return c.EnableNodePort || BPFHostRoutingEnabled || Config.EnableWireguard
}
func (c *DaemonConfig) validateIPv6ClusterAllocCIDR() error {
@@ -2812,16 +2897,12 @@ func (c *DaemonConfig) Validate(vp *viper.Viper) error {
SingleClusterRouteName, RoutingMode, RoutingModeNative)
}
- if c.ClusterID < clustermeshTypes.ClusterIDMin || c.ClusterID > clustermeshTypes.ClusterIDMax {
- return fmt.Errorf("invalid cluster id %d: must be in range %d..%d",
- c.ClusterID, clustermeshTypes.ClusterIDMin, clustermeshTypes.ClusterIDMax)
+ cinfo := clustermeshTypes.ClusterInfo{
+ ID: c.ClusterID,
+ Name: c.ClusterName,
}
-
- if c.ClusterID != 0 {
- if c.ClusterName == defaults.ClusterName {
- return fmt.Errorf("cannot use default cluster name (%s) with option %s",
- defaults.ClusterName, ClusterIDName)
- }
+ if err := cinfo.Validate(); err != nil {
+ return err
}
if err := c.checkMapSizeLimits(); err != nil {
@@ -2861,6 +2942,10 @@ func (c *DaemonConfig) Validate(vp *viper.Viper) error {
}
}
+ if err := c.validatePolicyCIDRMatchMode(); err != nil {
+ return err
+ }
+
return nil
}
@@ -2968,8 +3053,8 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) {
c.AutoCreateCiliumNodeResource = vp.GetBool(AutoCreateCiliumNodeResource)
c.BPFRoot = vp.GetString(BPFRoot)
c.CGroupRoot = vp.GetString(CGroupRoot)
- c.ClusterID = vp.GetUint32(ClusterIDName)
- c.ClusterName = vp.GetString(ClusterName)
+ c.ClusterID = vp.GetUint32(clustermeshTypes.OptClusterID)
+ c.ClusterName = vp.GetString(clustermeshTypes.OptClusterName)
c.DatapathMode = vp.GetString(DatapathMode)
c.Debug = vp.GetBool(DebugArg)
c.DebugVerbose = vp.GetStringSlice(DebugVerbose)
@@ -2991,10 +3076,12 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) {
c.L2AnnouncerRenewDeadline = vp.GetDuration(L2AnnouncerRenewDeadline)
c.L2AnnouncerRetryPeriod = vp.GetDuration(L2AnnouncerRetryPeriod)
c.EnableWireguardUserspaceFallback = vp.GetBool(EnableWireguardUserspaceFallback)
+ c.WireguardPersistentKeepalive = vp.GetDuration(WireguardPersistentKeepalive)
c.EnableWellKnownIdentities = vp.GetBool(EnableWellKnownIdentities)
c.EnableXDPPrefilter = vp.GetBool(EnableXDPPrefilter)
c.DisableCiliumEndpointCRD = vp.GetBool(DisableCiliumEndpointCRDName)
- c.EgressMasqueradeInterfaces = vp.GetString(EgressMasqueradeInterfaces)
+ c.MasqueradeInterfaces = vp.GetStringSlice(MasqueradeInterfaces)
+ c.EgressMasqueradeInterfaces = strings.Join(c.MasqueradeInterfaces, ",")
c.BPFSocketLBHostnsOnly = vp.GetBool(BPFSocketLBHostnsOnly)
c.EnableSocketLB = vp.GetBool(EnableSocketLB)
c.EnableSocketLBTracing = vp.GetBool(EnableSocketLBTracing)
@@ -3006,6 +3093,7 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) {
c.EnableHealthChecking = vp.GetBool(EnableHealthChecking)
c.EnableEndpointHealthChecking = vp.GetBool(EnableEndpointHealthChecking)
c.EnableHealthCheckNodePort = vp.GetBool(EnableHealthCheckNodePort)
+ c.EnableHealthCheckLoadBalancerIP = vp.GetBool(EnableHealthCheckLoadBalancerIP)
c.EnableLocalNodeRoute = vp.GetBool(EnableLocalNodeRoute)
c.EnablePolicy = strings.ToLower(vp.GetString(EnablePolicy))
c.EnableExternalIPs = vp.GetBool(EnableExternalIPs)
@@ -3050,7 +3138,6 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) {
c.IPv6Range = vp.GetString(IPv6Range)
c.IPv6ServiceRange = vp.GetString(IPv6ServiceRange)
c.JoinCluster = vp.GetBool(JoinClusterName)
- c.K8sEnableK8sEndpointSlice = vp.GetBool(K8sEnableEndpointSlice)
c.K8sRequireIPv4PodCIDR = vp.GetBool(K8sRequireIPv4PodCIDRName)
c.K8sRequireIPv6PodCIDR = vp.GetBool(K8sRequireIPv6PodCIDRName)
c.K8sServiceCacheSize = uint(vp.GetInt(K8sServiceCacheSize))
@@ -3121,7 +3208,6 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) {
c.PolicyAuditMode = vp.GetBool(PolicyAuditModeArg)
c.EnableIPv4FragmentsTracking = vp.GetBool(EnableIPv4FragmentsTrackingName)
c.FragmentsMapEntries = vp.GetInt(FragmentsMapEntriesName)
- c.K8sServiceProxyName = vp.GetString(K8sServiceProxyName)
c.CRDWaitTimeout = vp.GetDuration(CRDWaitTimeout)
c.LoadBalancerDSRDispatch = vp.GetString(LoadBalancerDSRDispatch)
c.LoadBalancerDSRL4Xlate = vp.GetString(LoadBalancerDSRL4Xlate)
@@ -3132,12 +3218,14 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) {
c.BGPAnnounceLBIP = vp.GetBool(BGPAnnounceLBIP)
c.BGPAnnouncePodCIDR = vp.GetBool(BGPAnnouncePodCIDR)
c.BGPConfigPath = vp.GetString(BGPConfigPath)
+ c.BGPSecretsNamespace = vp.GetString(BGPSecretsNamespace)
c.ExternalClusterIP = vp.GetBool(ExternalClusterIPName)
c.EnableNat46X64Gateway = vp.GetBool(EnableNat46X64Gateway)
c.EnableHighScaleIPcache = vp.GetBool(EnableHighScaleIPcache)
c.EnableIPv4Masquerade = vp.GetBool(EnableIPv4Masquerade) && c.EnableIPv4
c.EnableIPv6Masquerade = vp.GetBool(EnableIPv6Masquerade) && c.EnableIPv6
c.EnableBPFMasquerade = vp.GetBool(EnableBPFMasquerade)
+ c.EnableMasqueradeRouteSource = vp.GetBool(EnableMasqueradeRouteSource)
c.DeriveMasqIPAddrFromDevice = vp.GetString(DeriveMasqIPAddrFromDevice)
c.EnablePMTUDiscovery = vp.GetBool(EnablePMTUDiscovery)
c.IPv6NAT46x64CIDR = defaults.IPv6NAT46x64CIDR
@@ -3213,6 +3301,26 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) {
}
}
+ encryptionStrictModeEnabled := vp.GetBool(EnableEncryptionStrictMode)
+ if encryptionStrictModeEnabled {
+ if c.EnableIPv6 {
+ log.Warnf("WireGuard encryption strict mode only support IPv4. IPv6 traffic is not protected and can be leaked.")
+ }
+
+ strictCIDR := vp.GetString(EncryptionStrictModeCIDR)
+ c.EncryptionStrictModeCIDR, err = netip.ParsePrefix(strictCIDR)
+ if err != nil {
+ log.WithError(err).Fatalf("Cannot parse CIDR %s from --%s option", strictCIDR, EncryptionStrictModeCIDR)
+ }
+
+ if !c.EncryptionStrictModeCIDR.Addr().Is4() {
+ log.Fatalf("%s must be an IPv4 CIDR", EncryptionStrictModeCIDR)
+ }
+
+ c.EncryptionStrictModeAllowRemoteNodeIdentities = vp.GetBool(EncryptionStrictModeAllowRemoteNodeIdentities)
+ c.EnableEncryptionStrictMode = encryptionStrictModeEnabled
+ }
+
ipv4NativeRoutingCIDR := vp.GetString(IPv4NativeRoutingCIDR)
if ipv4NativeRoutingCIDR != "" {
@@ -3282,6 +3390,7 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) {
c.DNSProxyConcurrencyProcessingGracePeriod = vp.GetDuration(DNSProxyConcurrencyProcessingGracePeriod)
c.DNSProxyLockCount = vp.GetInt(DNSProxyLockCount)
c.DNSProxyLockTimeout = vp.GetDuration(DNSProxyLockTimeout)
+ c.FQDNRejectResponse = vp.GetString(FQDNRejectResponseCode)
// Convert IP strings into net.IPNet types
subnets, invalid := ip.ParseCIDRs(vp.GetStringSlice(IPv4PodSubnets))
@@ -3330,6 +3439,7 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) {
}
c.ConntrackGCInterval = vp.GetDuration(ConntrackGCInterval)
+ c.ConntrackGCMaxInterval = vp.GetDuration(ConntrackGCMaxInterval)
if m, err := command.GetStringMapStringE(vp, KVStoreOpt); err != nil {
log.Fatalf("unable to parse %s: %s", KVStoreOpt, err)
@@ -3343,12 +3453,6 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) {
c.LogOpt = m
}
- if m, err := command.GetStringMapStringE(vp, APIRateLimitName); err != nil {
- log.Fatalf("unable to parse %s: %s", APIRateLimitName, err)
- } else {
- c.APIRateLimit = m
- }
-
c.bpfMapEventConfigs = make(BPFEventBufferConfigs)
parseBPFMapEventConfigs(c.bpfMapEventConfigs, defaults.BPFEventBufferConfigs)
if m, err := command.GetStringMapStringE(vp, BPFMapEventBuffers); err != nil {
@@ -3405,7 +3509,7 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) {
}
switch c.IPAM {
- case ipamOption.IPAMKubernetes, ipamOption.IPAMClusterPool, ipamOption.IPAMClusterPoolV2:
+ case ipamOption.IPAMKubernetes, ipamOption.IPAMClusterPool:
if c.EnableIPv4 {
c.K8sRequireIPv4PodCIDR = true
}
@@ -3443,12 +3547,47 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) {
c.HubbleExportFileMaxSizeMB = vp.GetInt(HubbleExportFileMaxSizeMB)
c.HubbleExportFileMaxBackups = vp.GetInt(HubbleExportFileMaxBackups)
c.HubbleExportFileCompress = vp.GetBool(HubbleExportFileCompress)
+
+ for _, enc := range vp.GetStringSlice(HubbleExportAllowlist) {
+ dec := json.NewDecoder(strings.NewReader(enc))
+ var result flowpb.FlowFilter
+ if err := dec.Decode(&result); err != nil {
+ if err == io.EOF {
+ break
+ }
+ log.Fatalf("failed to decode hubble-export-allowlist '%v': %s", enc, err)
+ }
+ c.HubbleExportAllowlist = append(c.HubbleExportAllowlist, &result)
+ }
+
+ for _, enc := range vp.GetStringSlice(HubbleExportDenylist) {
+ dec := json.NewDecoder(strings.NewReader(enc))
+ var result flowpb.FlowFilter
+ if err := dec.Decode(&result); err != nil {
+ if err == io.EOF {
+ break
+ }
+ log.Fatalf("failed to decode hubble-export-denylist '%v': %s", enc, err)
+ }
+ c.HubbleExportDenylist = append(c.HubbleExportDenylist, &result)
+ }
+
+ if fm := vp.GetStringSlice(HubbleExportFieldmask); len(fm) > 0 {
+ _, err := fieldmaskpb.New(&flowpb.Flow{}, fm...)
+ if err != nil {
+ log.Fatalf("hubble-export-fieldmask contains invalid fieldmask '%v': %s", fm, err)
+ }
+ c.HubbleExportFieldmask = vp.GetStringSlice(HubbleExportFieldmask)
+ }
+
c.EnableHubbleRecorderAPI = vp.GetBool(EnableHubbleRecorderAPI)
c.HubbleRecorderStoragePath = vp.GetString(HubbleRecorderStoragePath)
c.HubbleRecorderSinkQueueSize = vp.GetInt(HubbleRecorderSinkQueueSize)
c.HubbleSkipUnknownCGroupIDs = vp.GetBool(HubbleSkipUnknownCGroupIDs)
c.HubbleMonitorEvents = vp.GetStringSlice(HubbleMonitorEvents)
- c.HubbleRedact = vp.GetStringSlice(HubbleRedact)
+ c.HubbleRedactEnabled = vp.GetBool(HubbleRedactEnabled)
+ c.HubbleRedactHttpURLQuery = vp.GetBool(HubbleRedactHttpURLQuery)
+ c.HubbleRedactKafkaApiKey = vp.GetBool(HubbleRedactKafkaApiKey)
c.DisableIptablesFeederRules = vp.GetStringSlice(DisableIptablesFeederRules)
@@ -3493,6 +3632,7 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) {
// To support K8s NetworkPolicy
c.EnableK8sNetworkPolicy = vp.GetBool(EnableK8sNetworkPolicy)
+ c.PolicyCIDRMatchMode = vp.GetStringSlice(PolicyCIDRMatchMode)
}
func (c *DaemonConfig) populateDevices(vp *viper.Viper) {
@@ -3663,29 +3803,52 @@ func (c *DaemonConfig) checkMapSizeLimits() error {
}
func (c *DaemonConfig) checkIPv4NativeRoutingCIDR() error {
- if c.GetIPv4NativeRoutingCIDR() == nil && c.EnableIPv4Masquerade && !c.TunnelingEnabled() &&
- c.IPAMMode() != ipamOption.IPAMENI && c.EnableIPv4 && c.IPAMMode() != ipamOption.IPAMAlibabaCloud {
- return fmt.Errorf(
- "native routing cidr must be configured with option --%s "+
- "in combination with --%s --%s=%s --%s=%s --%s=true",
- IPv4NativeRoutingCIDR, EnableIPv4Masquerade, RoutingMode, RoutingModeNative,
- IPAM, c.IPAMMode(), EnableIPv4Name)
+ if c.GetIPv4NativeRoutingCIDR() != nil {
+ return nil
+ }
+ if !c.EnableIPv4 || !c.EnableIPv4Masquerade {
+ return nil
+ }
+ if c.EnableIPMasqAgent {
+ return nil
+ }
+ if c.TunnelingEnabled() {
+ return nil
+ }
+ if c.IPAMMode() == ipamOption.IPAMENI || c.IPAMMode() == ipamOption.IPAMAlibabaCloud {
+ return nil
}
- return nil
+ return fmt.Errorf(
+ "native routing cidr must be configured with option --%s "+
+ "in combination with --%s=true --%s=true --%s=false --%s=%s --%s=%s",
+ IPv4NativeRoutingCIDR,
+ EnableIPv4Name, EnableIPv4Masquerade,
+ EnableIPMasqAgent,
+ RoutingMode, RoutingModeNative,
+ IPAM, c.IPAMMode())
}
func (c *DaemonConfig) checkIPv6NativeRoutingCIDR() error {
- if c.GetIPv6NativeRoutingCIDR() == nil && c.EnableIPv6Masquerade && !c.TunnelingEnabled() &&
- c.EnableIPv6 {
- return fmt.Errorf(
- "native routing cidr must be configured with option --%s "+
- "in combination with --%s --%s=%s --%s=true",
- IPv6NativeRoutingCIDR, EnableIPv6Masquerade, RoutingMode, RoutingModeNative,
- EnableIPv6Name)
+ if c.GetIPv6NativeRoutingCIDR() != nil {
+ return nil
}
-
- return nil
+ if !c.EnableIPv6 || !c.EnableIPv6Masquerade {
+ return nil
+ }
+ if c.EnableIPMasqAgent {
+ return nil
+ }
+ if c.TunnelingEnabled() {
+ return nil
+ }
+ return fmt.Errorf(
+ "native routing cidr must be configured with option --%s "+
+ "in combination with --%s=true --%s=true --%s=false --%s=%s",
+ IPv6NativeRoutingCIDR,
+ EnableIPv6Name, EnableIPv6Masquerade,
+ EnableIPMasqAgent,
+ RoutingMode, RoutingModeNative)
}
func (c *DaemonConfig) checkIPAMDelegatedPlugin() error {
@@ -3702,6 +3865,15 @@ func (c *DaemonConfig) checkIPAMDelegatedPlugin() error {
if c.EnableEndpointHealthChecking {
return fmt.Errorf("--%s must be disabled with --%s=%s", EnableEndpointHealthChecking, IPAM, ipamOption.IPAMDelegatedPlugin)
}
+ // Ingress controller and envoy config require cilium-agent to create an IP address
+ // specifically for differentiating ingress and envoy traffic, which is not possible
+ // with delegated IPAM.
+ if c.EnableIngressController {
+ return fmt.Errorf("--%s must be disabled with --%s=%s", EnableIngressController, IPAM, ipamOption.IPAMDelegatedPlugin)
+ }
+ if c.EnableEnvoyConfig {
+ return fmt.Errorf("--%s must be disabled with --%s=%s", EnableEnvoyConfig, IPAM, ipamOption.IPAMDelegatedPlugin)
+ }
}
return nil
}
@@ -3716,6 +3888,7 @@ func (c *DaemonConfig) calculateBPFMapSizes(vp *viper.Viper) error {
c.NATMapEntriesGlobal = vp.GetInt(NATMapEntriesGlobalName)
c.NeighMapEntriesGlobal = vp.GetInt(NeighMapEntriesGlobalName)
c.PolicyMapEntries = vp.GetInt(PolicyMapEntriesName)
+ c.PolicyMapFullReconciliationInterval = vp.GetDuration(PolicyMapFullReconciliationIntervalName)
c.SockRevNatEntries = vp.GetInt(SockRevNatEntriesName)
c.LBMapEntries = vp.GetInt(LBMapEntriesName)
c.LBServiceMapEntries = vp.GetInt(LBServiceMapMaxEntries)
@@ -3943,6 +4116,10 @@ func (c *DaemonConfig) BGPControlPlaneEnabled() bool {
return c.EnableBGPControlPlane
}
+func (c *DaemonConfig) IsDualStack() bool {
+ return c.EnableIPv4 && c.EnableIPv6
+}
+
// StoreViperInFile stores viper's configuration in a the given directory under
// the file name 'viper-config.yaml'. If this file already exists, it is renamed
// to 'viper-config-1.yaml', if 'viper-config-1.yaml' also exists,
diff --git a/vendor/github.com/cilium/cilium/pkg/policy/api/cidr.go b/vendor/github.com/cilium/cilium/pkg/policy/api/cidr.go
new file mode 100644
index 000000000..34680b5fa
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/policy/api/cidr.go
@@ -0,0 +1,201 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package api
+
+import (
+ "net"
+ "net/netip"
+ "strings"
+
+ "github.com/cilium/cilium/pkg/ip"
+ "github.com/cilium/cilium/pkg/labels"
+ cidrpkg "github.com/cilium/cilium/pkg/labels/cidr"
+ "github.com/cilium/cilium/pkg/option"
+)
+
+// +kubebuilder:validation:Pattern=`^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\/([0-9]|[1-2][0-9]|3[0-2])$|^s*((([0-9A-Fa-f]{1,4}:){7}(:|([0-9A-Fa-f]{1,4})))|(([0-9A-Fa-f]{1,4}:){6}:([0-9A-Fa-f]{1,4})?)|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){0,1}):([0-9A-Fa-f]{1,4})?))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){0,2}):([0-9A-Fa-f]{1,4})?))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){0,3}):([0-9A-Fa-f]{1,4})?))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){0,4}):([0-9A-Fa-f]{1,4})?))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){0,5}):([0-9A-Fa-f]{1,4})?))|(:(:|((:[0-9A-Fa-f]{1,4}){1,7}))))(%.+)?s*/([0-9]|[1-9][0-9]|1[0-1][0-9]|12[0-8])$`
+
+// CIDR specifies a block of IP addresses.
+// Example: 192.0.2.1/32
+type CIDR string
+
+var (
+ ipv4All = CIDR("0.0.0.0/0")
+ ipv6All = CIDR("::/0")
+)
+
+// CIDRRule is a rule that specifies a CIDR prefix to/from which outside
+// communication is allowed, along with an optional list of subnets within that
+// CIDR prefix to/from which outside communication is not allowed.
+type CIDRRule struct {
+ // CIDR is a CIDR prefix / IP Block.
+ //
+ // +kubebuilder:validation:OneOf
+ Cidr CIDR `json:"cidr,omitempty"`
+
+ // CIDRGroupRef is a reference to a CiliumCIDRGroup object.
+ // A CiliumCIDRGroup contains a list of CIDRs that the endpoint, subject to
+ // the rule, can (Ingress) or cannot (IngressDeny) receive connections from.
+ //
+ // +kubebuilder:validation:OneOf
+ CIDRGroupRef CIDRGroupRef `json:"cidrGroupRef,omitempty"`
+
+ // ExceptCIDRs is a list of IP blocks which the endpoint subject to the rule
+ // is not allowed to initiate connections to. These CIDR prefixes should be
+ // contained within Cidr, using ExceptCIDRs together with CIDRGroupRef is not
+ // supported yet.
+ // These exceptions are only applied to the Cidr in this CIDRRule, and do not
+ // apply to any other CIDR prefixes in any other CIDRRules.
+ //
+ // +kubebuilder:validation:Optional
+ ExceptCIDRs []CIDR `json:"except,omitempty"`
+
+ // Generated indicates whether the rule was generated based on other rules
+ // or provided by user
+ Generated bool `json:"-"`
+}
+
+// String converts the CIDRRule into a human-readable string.
+func (r CIDRRule) String() string {
+ exceptCIDRs := ""
+ if len(r.ExceptCIDRs) > 0 {
+ exceptCIDRs = "-" + CIDRSlice(r.ExceptCIDRs).String()
+ }
+ return string(r.Cidr) + exceptCIDRs
+}
+
+// CIDRSlice is a slice of CIDRs. It allows receiver methods to be defined for
+// transforming the slice into other convenient forms such as
+// EndpointSelectorSlice.
+type CIDRSlice []CIDR
+
+// GetAsEndpointSelectors returns the provided CIDR slice as a slice of
+// endpoint selectors
+func (s CIDRSlice) GetAsEndpointSelectors() EndpointSelectorSlice {
+ // If multiple CIDRs representing reserved:world are in this CIDRSlice,
+ // we only have to add the EndpointSelector representing reserved:world
+ // once.
+ var hasIPv4AllBeenAdded, hasIPv6AllBeenAdded bool
+ slice := EndpointSelectorSlice{}
+ for _, cidr := range s {
+ if cidr == ipv4All {
+ hasIPv4AllBeenAdded = true
+ }
+ if cidr == ipv6All {
+ hasIPv6AllBeenAdded = true
+ }
+ lbl, err := cidrpkg.IPStringToLabel(string(cidr))
+ if err == nil {
+ slice = append(slice, NewESFromLabels(lbl))
+ }
+ // TODO: Log the error?
+ }
+
+ if option.Config.IsDualStack() {
+ // If Cilium is in dual-stack mode then world-ipv4 and
+ // world-ipv6 need to be distinguished from one another.
+ if hasIPv4AllBeenAdded && hasIPv6AllBeenAdded {
+ slice = append(slice, ReservedEndpointSelectors[labels.IDNameWorld])
+ }
+ if hasIPv4AllBeenAdded {
+ slice = append(slice, ReservedEndpointSelectors[labels.IDNameWorldIPv4])
+ }
+ if hasIPv6AllBeenAdded {
+ slice = append(slice, ReservedEndpointSelectors[labels.IDNameWorldIPv6])
+ }
+ } else if option.Config.EnableIPv4 && hasIPv4AllBeenAdded {
+ slice = append(slice, ReservedEndpointSelectors[labels.IDNameWorld])
+ } else if option.Config.EnableIPv6 && hasIPv6AllBeenAdded {
+ slice = append(slice, ReservedEndpointSelectors[labels.IDNameWorld])
+ }
+ return slice
+}
+
+// StringSlice returns the CIDR slice as a slice of strings.
+func (s CIDRSlice) StringSlice() []string {
+ result := make([]string, 0, len(s))
+ for _, c := range s {
+ result = append(result, string(c))
+ }
+ return result
+}
+
+// String converts the CIDRSlice into a human-readable string.
+func (s CIDRSlice) String() string {
+ if len(s) == 0 {
+ return ""
+ }
+ return "[" + strings.Join(s.StringSlice(), ",") + "]"
+}
+
+// CIDRRuleSlice is a slice of CIDRRules. It allows receiver methods to be
+// defined for transforming the slice into other convenient forms such as
+// EndpointSelectorSlice.
+type CIDRRuleSlice []CIDRRule
+
+// GetAsEndpointSelectors returns the provided CIDRRule slice as a slice of
+// endpoint selectors
+func (s CIDRRuleSlice) GetAsEndpointSelectors() EndpointSelectorSlice {
+ cidrs := ComputeResultantCIDRSet(s)
+ return cidrs.GetAsEndpointSelectors()
+}
+
+// StringSlice returns the CIDRRuleSlice as a slice of strings.
+func (s CIDRRuleSlice) StringSlice() []string {
+ result := make([]string, 0, len(s))
+ for _, c := range s {
+ result = append(result, c.String())
+ }
+ return result
+}
+
+// ComputeResultantCIDRSet converts a slice of CIDRRules into a slice of
+// individual CIDRs. This expands the cidr defined by each CIDRRule, applies
+// the CIDR exceptions defined in "ExceptCIDRs", and forms a minimal set of
+// CIDRs that cover all of the CIDRRules.
+//
+// Assumes no error checking is necessary as CIDRRule.Sanitize already does this.
+func ComputeResultantCIDRSet(cidrs CIDRRuleSlice) CIDRSlice {
+ var allResultantAllowedCIDRs CIDRSlice
+ for _, s := range cidrs {
+ _, allowNet, _ := net.ParseCIDR(string(s.Cidr))
+
+ var removeSubnets []*net.IPNet
+ for _, t := range s.ExceptCIDRs {
+ _, removeSubnet, _ := net.ParseCIDR(string(t))
+ removeSubnets = append(removeSubnets, removeSubnet)
+ }
+ resultantAllowedCIDRs := ip.RemoveCIDRs([]*net.IPNet{allowNet}, removeSubnets)
+
+ for _, u := range resultantAllowedCIDRs {
+ allResultantAllowedCIDRs = append(allResultantAllowedCIDRs, CIDR(u.String()))
+ }
+ }
+ return allResultantAllowedCIDRs
+}
+
+// addrsToCIDRRules generates CIDRRules for the IPs passed in.
+// This function will mark the rule to Generated true by default.
+func addrsToCIDRRules(addrs []netip.Addr) []CIDRRule {
+ cidrRules := make([]CIDRRule, 0, len(addrs))
+ for _, addr := range addrs {
+ rule := CIDRRule{ExceptCIDRs: make([]CIDR, 0)}
+ rule.Generated = true
+ if addr.Is4() {
+ rule.Cidr = CIDR(addr.String() + "/32")
+ } else {
+ rule.Cidr = CIDR(addr.String() + "/128")
+ }
+ cidrRules = append(cidrRules, rule)
+ }
+ return cidrRules
+}
+
+// +kubebuilder:validation:MaxLength=253
+// +kubebuilder:validation:Pattern=`^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$`
+//
+// CIDRGroupRef is a reference to a CIDR Group.
+// A CIDR Group is a list of CIDRs whose IP addresses should be considered as a
+// same entity when applying fromCIDRGroupRefs policies on incoming network traffic.
+type CIDRGroupRef string
diff --git a/vendor/github.com/cilium/cilium/pkg/policy/api/decision.go b/vendor/github.com/cilium/cilium/pkg/policy/api/decision.go
new file mode 100644
index 000000000..3a05b3ada
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/policy/api/decision.go
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package api
+
+import (
+ "fmt"
+)
+
+// Decision is a reachability policy decision
+type Decision byte
+
+const (
+ // Undecided means that we have not come to a decision yet
+ Undecided Decision = iota
+ // Allowed means that reachability is allowed
+ Allowed
+ // Denied means that reachability is denied
+ Denied
+)
+
+var (
+ decisionToString = map[Decision]string{
+ Undecided: "undecided",
+ Allowed: "allowed",
+ Denied: "denied",
+ }
+ stringToDecision = map[string]Decision{
+ "undecided": Undecided,
+ "allowed": Allowed,
+ "denied": Denied,
+ }
+)
+
+// String returns the decision in human readable format
+func (d Decision) String() string {
+ if v, exists := decisionToString[d]; exists {
+ return v
+ }
+ return ""
+}
+
+// UnmarshalJSON parses a JSON formatted buffer and returns a decision
+func (d *Decision) UnmarshalJSON(b []byte) error {
+ if d == nil {
+ d = new(Decision)
+ }
+ if len(b) <= len(`""`) {
+ return fmt.Errorf("invalid decision '%s'", string(b))
+ }
+ if v, exists := stringToDecision[string(b[1:len(b)-1])]; exists {
+ *d = v
+ return nil
+ }
+
+ return fmt.Errorf("unknown '%s' decision", string(b))
+}
+
+// MarshalJSON returns the decision as JSON formatted buffer
+func (d Decision) MarshalJSON() ([]byte, error) {
+ s := d.String()
+ // length of decision string plus two `"`
+ b := make([]byte, len(s)+2)
+ b[0] = '"'
+ copy(b[1:], s)
+ b[len(b)-1] = '"'
+ return b, nil
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/policy/api/doc.go b/vendor/github.com/cilium/cilium/pkg/policy/api/doc.go
new file mode 100644
index 000000000..ef8723992
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/policy/api/doc.go
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// +k8s:deepcopy-gen=package
+// +k8s:openapi-gen=true
+// +deepequal-gen=package
+
+// Package api defines the API of the Cilium network policy interface
+// +groupName=policy
+package api
diff --git a/vendor/github.com/cilium/cilium/pkg/policy/api/egress.go b/vendor/github.com/cilium/cilium/pkg/policy/api/egress.go
new file mode 100644
index 000000000..f4eac3d72
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/policy/api/egress.go
@@ -0,0 +1,370 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package api
+
+import (
+ "context"
+
+ slim_metav1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1"
+)
+
+// EgressCommonRule is a rule that shares some of its fields across the
+// EgressRule and EgressDenyRule. It's publicly exported so the code generators
+// can generate code for this structure.
+type EgressCommonRule struct {
+ // ToEndpoints is a list of endpoints identified by an EndpointSelector to
+ // which the endpoints subject to the rule are allowed to communicate.
+ //
+ // Example:
+ // Any endpoint with the label "role=frontend" can communicate with any
+ // endpoint carrying the label "role=backend".
+ //
+ // +kubebuilder:validation:Optional
+ ToEndpoints []EndpointSelector `json:"toEndpoints,omitempty"`
+
+ // ToRequires is a list of additional constraints which must be met
+ // in order for the selected endpoints to be able to connect to other
+ // endpoints. These additional constraints do no by itself grant access
+ // privileges and must always be accompanied with at least one matching
+ // ToEndpoints.
+ //
+ // Example:
+ // Any Endpoint with the label "team=A" requires any endpoint to which it
+ // communicates to also carry the label "team=A".
+ //
+ // +kubebuilder:validation:Optional
+ ToRequires []EndpointSelector `json:"toRequires,omitempty"`
+
+ // ToCIDR is a list of IP blocks which the endpoint subject to the rule
+ // is allowed to initiate connections. Only connections destined for
+ // outside of the cluster and not targeting the host will be subject
+ // to CIDR rules. This will match on the destination IP address of
+ // outgoing connections. Adding a prefix into ToCIDR or into ToCIDRSet
+ // with no ExcludeCIDRs is equivalent. Overlaps are allowed between
+ // ToCIDR and ToCIDRSet.
+ //
+ // Example:
+ // Any endpoint with the label "app=database-proxy" is allowed to
+ // initiate connections to 10.2.3.0/24
+ //
+ // +kubebuilder:validation:Optional
+ ToCIDR CIDRSlice `json:"toCIDR,omitempty"`
+
+ // ToCIDRSet is a list of IP blocks which the endpoint subject to the rule
+ // is allowed to initiate connections to in addition to connections
+ // which are allowed via ToEndpoints, along with a list of subnets contained
+ // within their corresponding IP block to which traffic should not be
+ // allowed. This will match on the destination IP address of outgoing
+ // connections. Adding a prefix into ToCIDR or into ToCIDRSet with no
+ // ExcludeCIDRs is equivalent. Overlaps are allowed between ToCIDR and
+ // ToCIDRSet.
+ //
+ // Example:
+ // Any endpoint with the label "app=database-proxy" is allowed to
+ // initiate connections to 10.2.3.0/24 except from IPs in subnet 10.2.3.0/28.
+ //
+ // +kubebuilder:validation:Optional
+ ToCIDRSet CIDRRuleSlice `json:"toCIDRSet,omitempty"`
+
+ // ToEntities is a list of special entities to which the endpoint subject
+ // to the rule is allowed to initiate connections. Supported entities are
+ // `world`, `cluster`,`host`,`remote-node`,`kube-apiserver`, `init`,
+ // `health`,`unmanaged` and `all`.
+ //
+ // +kubebuilder:validation:Optional
+ ToEntities EntitySlice `json:"toEntities,omitempty"`
+
+ // ToServices is a list of services to which the endpoint subject
+ // to the rule is allowed to initiate connections.
+ // Currently Cilium only supports toServices for K8s services without
+ // selectors.
+ //
+ // Example:
+ // Any endpoint with the label "app=backend-app" is allowed to
+ // initiate connections to all cidrs backing the "external-service" service
+ //
+ // +kubebuilder:validation:Optional
+ ToServices []Service `json:"toServices,omitempty"`
+
+ // ToGroups is a directive that allows the integration with multiple outside
+ // providers. Currently, only AWS is supported, and the rule can select by
+ // multiple sub directives:
+ //
+ // Example:
+ // toGroups:
+ // - aws:
+ // securityGroupsIds:
+ // - 'sg-XXXXXXXXXXXXX'
+ //
+ // +kubebuilder:validation:Optional
+ ToGroups []ToGroups `json:"toGroups,omitempty"`
+
+ // TODO: Move this to the policy package
+ // (https://github.com/cilium/cilium/issues/8353)
+ aggregatedSelectors EndpointSelectorSlice `json:"-"`
+}
+
+// EgressRule contains all rule types which can be applied at egress, i.e.
+// network traffic that originates inside the endpoint and exits the endpoint
+// selected by the endpointSelector.
+//
+// - All members of this structure are optional. If omitted or empty, the
+// member will have no effect on the rule.
+//
+// - If multiple members of the structure are specified, then all members
+// must match in order for the rule to take effect. The exception to this
+// rule is the ToRequires member; the effects of any Requires field in any
+// rule will apply to all other rules as well.
+//
+// - ToEndpoints, ToCIDR, ToCIDRSet, ToEntities, ToServices and ToGroups are
+// mutually exclusive. Only one of these members may be present within an
+// individual rule.
+type EgressRule struct {
+ EgressCommonRule `json:",inline"`
+
+ // ToPorts is a list of destination ports identified by port number and
+ // protocol which the endpoint subject to the rule is allowed to
+ // connect to.
+ //
+ // Example:
+ // Any endpoint with the label "role=frontend" is allowed to initiate
+ // connections to destination port 8080/tcp
+ //
+ // +kubebuilder:validation:Optional
+ ToPorts PortRules `json:"toPorts,omitempty"`
+
+ // ToFQDN allows whitelisting DNS names in place of IPs. The IPs that result
+ // from DNS resolution of `ToFQDN.MatchName`s are added to the same
+ // EgressRule object as ToCIDRSet entries, and behave accordingly. Any L4 and
+ // L7 rules within this EgressRule will also apply to these IPs.
+ // The DNS -> IP mapping is re-resolved periodically from within the
+ // cilium-agent, and the IPs in the DNS response are effected in the policy
+ // for selected pods as-is (i.e. the list of IPs is not modified in any way).
+ // Note: An explicit rule to allow for DNS traffic is needed for the pods, as
+ // ToFQDN counts as an egress rule and will enforce egress policy when
+ // PolicyEnforcment=default.
+ // Note: If the resolved IPs are IPs within the kubernetes cluster, the
+ // ToFQDN rule will not apply to that IP.
+ // Note: ToFQDN cannot occur in the same policy as other To* rules.
+ //
+ // +kubebuilder:validation:Optional
+ ToFQDNs FQDNSelectorSlice `json:"toFQDNs,omitempty"`
+
+ // ICMPs is a list of ICMP rule identified by type number
+ // which the endpoint subject to the rule is allowed to connect to.
+ //
+ // Example:
+ // Any endpoint with the label "app=httpd" is allowed to initiate
+ // type 8 ICMP connections.
+ //
+ // +kubebuilder:validation:Optional
+ ICMPs ICMPRules `json:"icmps,omitempty"`
+
+ // Authentication is the required authentication type for the allowed traffic, if any.
+ //
+ // +kubebuilder:validation:Optional
+ Authentication *Authentication `json:"authentication,omitempty"`
+}
+
+// EgressDenyRule contains all rule types which can be applied at egress, i.e.
+// network traffic that originates inside the endpoint and exits the endpoint
+// selected by the endpointSelector.
+//
+// - All members of this structure are optional. If omitted or empty, the
+// member will have no effect on the rule.
+//
+// - If multiple members of the structure are specified, then all members
+// must match in order for the rule to take effect. The exception to this
+// rule is the ToRequires member; the effects of any Requires field in any
+// rule will apply to all other rules as well.
+//
+// - ToEndpoints, ToCIDR, ToCIDRSet, ToEntities, ToServices and ToGroups are
+// mutually exclusive. Only one of these members may be present within an
+// individual rule.
+type EgressDenyRule struct {
+ EgressCommonRule `json:",inline"`
+
+ // ToPorts is a list of destination ports identified by port number and
+ // protocol which the endpoint subject to the rule is not allowed to connect
+ // to.
+ //
+ // Example:
+ // Any endpoint with the label "role=frontend" is not allowed to initiate
+ // connections to destination port 8080/tcp
+ //
+ // +kubebuilder:validation:Optional
+ ToPorts PortDenyRules `json:"toPorts,omitempty"`
+
+ // ICMPs is a list of ICMP rule identified by type number
+ // which the endpoint subject to the rule is not allowed to connect to.
+ //
+ // Example:
+ // Any endpoint with the label "app=httpd" is not allowed to initiate
+ // type 8 ICMP connections.
+ //
+ // +kubebuilder:validation:Optional
+ ICMPs ICMPRules `json:"icmps,omitempty"`
+}
+
+// SetAggregatedSelectors creates a single slice containing all of the following
+// fields within the EgressCommonRule, converted to EndpointSelector, to be
+// stored by the caller of the EgressCommonRule for easy lookup while performing
+// policy evaluation for the rule:
+// * ToEntities
+// * ToCIDR
+// * ToCIDRSet
+// * ToFQDNs
+//
+// ToEndpoints is not aggregated due to requirement folding in
+// GetDestinationEndpointSelectorsWithRequirements()
+func (e *EgressCommonRule) getAggregatedSelectors() EndpointSelectorSlice {
+ res := make(EndpointSelectorSlice, 0, len(e.ToEntities)+len(e.ToCIDR)+len(e.ToCIDRSet))
+ res = append(res, e.ToEntities.GetAsEndpointSelectors()...)
+ res = append(res, e.ToCIDR.GetAsEndpointSelectors()...)
+ res = append(res, e.ToCIDRSet.GetAsEndpointSelectors()...)
+ return res
+}
+
+// SetAggregatedSelectors creates a single slice containing all of the following
+// fields within the EgressRule, converted to EndpointSelector, to be stored
+// within the EgressRule for easy lookup while performing policy evaluation
+// for the rule:
+// * ToEntities
+// * ToCIDR
+// * ToCIDRSet
+// * ToFQDNs
+//
+// ToEndpoints is not aggregated due to requirement folding in
+// GetDestinationEndpointSelectorsWithRequirements()
+func (e *EgressRule) SetAggregatedSelectors() {
+ ess := e.getAggregatedSelectors()
+ ess = append(ess, e.ToFQDNs.GetAsEndpointSelectors()...)
+ e.aggregatedSelectors = ess
+}
+
+// SetAggregatedSelectors creates a single slice containing all of the following
+// fields within the EgressRule, converted to EndpointSelector, to be stored
+// within the EgressRule for easy lookup while performing policy evaluation
+// for the rule:
+// * ToEntities
+// * ToCIDR
+// * ToCIDRSet
+// * ToFQDNs
+//
+// ToEndpoints is not aggregated due to requirement folding in
+// GetDestinationEndpointSelectorsWithRequirements()
+func (e *EgressCommonRule) SetAggregatedSelectors() {
+ e.aggregatedSelectors = e.getAggregatedSelectors()
+}
+
+// GetDestinationEndpointSelectorsWithRequirements returns a slice of endpoints selectors covering
+// all L3 dst selectors of the egress rule
+func (e *EgressRule) GetDestinationEndpointSelectorsWithRequirements(requirements []slim_metav1.LabelSelectorRequirement) EndpointSelectorSlice {
+ if e.aggregatedSelectors == nil {
+ e.SetAggregatedSelectors()
+ }
+ return e.EgressCommonRule.getDestinationEndpointSelectorsWithRequirements(requirements)
+}
+
+// GetDestinationEndpointSelectorsWithRequirements returns a slice of endpoints selectors covering
+// all L3 source selectors of the ingress rule
+func (e *EgressDenyRule) GetDestinationEndpointSelectorsWithRequirements(requirements []slim_metav1.LabelSelectorRequirement) EndpointSelectorSlice {
+ if e.aggregatedSelectors == nil {
+ e.SetAggregatedSelectors()
+ }
+ return e.EgressCommonRule.getDestinationEndpointSelectorsWithRequirements(requirements)
+}
+
+// GetDestinationEndpointSelectorsWithRequirements returns a slice of endpoints selectors covering
+// all L3 source selectors of the ingress rule
+func (e *EgressCommonRule) getDestinationEndpointSelectorsWithRequirements(
+ requirements []slim_metav1.LabelSelectorRequirement,
+) EndpointSelectorSlice {
+
+ res := make(EndpointSelectorSlice, 0, len(e.ToEndpoints)+len(e.aggregatedSelectors))
+
+ if len(requirements) > 0 && len(e.ToEndpoints) > 0 {
+ for idx := range e.ToEndpoints {
+ sel := *e.ToEndpoints[idx].DeepCopy()
+ sel.MatchExpressions = append(sel.MatchExpressions, requirements...)
+ sel.SyncRequirementsWithLabelSelector()
+ // Even though this string is deep copied, we need to override it
+ // because we are updating the contents of the MatchExpressions.
+ sel.cachedLabelSelectorString = sel.LabelSelector.String()
+ res = append(res, sel)
+ }
+ } else {
+ res = append(res, e.ToEndpoints...)
+ }
+ return append(res, e.aggregatedSelectors...)
+}
+
+// AllowsWildcarding returns true if wildcarding should be performed upon
+// policy evaluation for the given rule.
+func (e *EgressRule) AllowsWildcarding() bool {
+ return e.EgressCommonRule.AllowsWildcarding() && len(e.ToFQDNs) == 0
+}
+
+// AllowsWildcarding returns true if wildcarding should be performed upon
+// policy evaluation for the given rule.
+func (e *EgressCommonRule) AllowsWildcarding() bool {
+ return len(e.ToRequires)+len(e.ToServices) == 0
+}
+
+// RequiresDerivative returns true when the EgressCommonRule contains sections
+// that need a derivative policy created in order to be enforced
+// (e.g. ToGroups).
+func (e *EgressCommonRule) RequiresDerivative() bool {
+ return len(e.ToGroups) > 0
+}
+
+// CreateDerivative will return a new rule based on the data gathered by the
+// rules that creates a new derivative policy.
+// In the case of ToGroups will call outside using the groups callback and this
+// function can take a bit of time.
+func (e *EgressRule) CreateDerivative(ctx context.Context) (*EgressRule, error) {
+ newRule := e.DeepCopy()
+ if !e.RequiresDerivative() {
+ return newRule, nil
+ }
+ newRule.ToCIDRSet = make(CIDRRuleSlice, 0, len(e.ToGroups))
+ for _, group := range e.ToGroups {
+ cidrSet, err := group.GetCidrSet(ctx)
+ if err != nil {
+ return &EgressRule{}, err
+ }
+ if len(cidrSet) == 0 {
+ return &EgressRule{}, nil
+ }
+ newRule.ToCIDRSet = append(e.ToCIDRSet, cidrSet...)
+ }
+ newRule.ToGroups = nil
+ e.SetAggregatedSelectors()
+ return newRule, nil
+}
+
+// CreateDerivative will return a new rule based on the data gathered by the
+// rules that creates a new derivative policy.
+// In the case of ToGroups will call outside using the groups callback and this
+// function can take a bit of time.
+func (e *EgressDenyRule) CreateDerivative(ctx context.Context) (*EgressDenyRule, error) {
+ newRule := e.DeepCopy()
+ if !e.RequiresDerivative() {
+ return newRule, nil
+ }
+ newRule.ToCIDRSet = make(CIDRRuleSlice, 0, len(e.ToGroups))
+ for _, group := range e.ToGroups {
+ cidrSet, err := group.GetCidrSet(ctx)
+ if err != nil {
+ return &EgressDenyRule{}, err
+ }
+ if len(cidrSet) == 0 {
+ return &EgressDenyRule{}, nil
+ }
+ newRule.ToCIDRSet = append(e.ToCIDRSet, cidrSet...)
+ }
+ newRule.ToGroups = nil
+ e.SetAggregatedSelectors()
+ return newRule, nil
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/policy/api/entity.go b/vendor/github.com/cilium/cilium/pkg/policy/api/entity.go
new file mode 100644
index 000000000..772916e64
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/policy/api/entity.go
@@ -0,0 +1,151 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package api
+
+import (
+ k8sapi "github.com/cilium/cilium/pkg/k8s/apis/cilium.io"
+ "github.com/cilium/cilium/pkg/labels"
+)
+
+// Entity specifies the class of receiver/sender endpoints that do not have
+// individual identities. Entities are used to describe "outside of cluster",
+// "host", etc.
+//
+// +kubebuilder:validation:Enum=all;world;cluster;host;init;ingress;unmanaged;remote-node;health;none;kube-apiserver
+type Entity string
+
+const (
+ // EntityAll is an entity that represents all traffic
+ EntityAll Entity = "all"
+
+ // EntityWorld is an entity that represents traffic external to
+ // endpoint's cluster
+ EntityWorld Entity = "world"
+
+ // EntityWorldIPv4 is an entity that represents traffic external to
+ // endpoint's cluster, specifically an IPv4 endpoint, to distinguish
+ // it from IPv6 in dual-stack mode.
+ EntityWorldIPv4 Entity = "world-ipv4"
+
+ // EntityWorldIPv6 is an entity that represents traffic external to
+ // endpoint's cluster, specifically an IPv6 endpoint, to distinguish
+ // it from IPv4 in dual-stack mode.
+ EntityWorldIPv6 Entity = "world-ipv6"
+
+ // EntityCluster is an entity that represents traffic within the
+ // endpoint's cluster, to endpoints not managed by cilium
+ EntityCluster Entity = "cluster"
+
+ // EntityHost is an entity that represents traffic within endpoint host
+ EntityHost Entity = "host"
+
+ // EntityInit is an entity that represents an initializing endpoint
+ EntityInit Entity = "init"
+
+ // EntityIngress is an entity that represents envoy proxy
+ EntityIngress Entity = "ingress"
+
+ // EntityUnmanaged is an entity that represents unamanaged endpoints.
+ EntityUnmanaged Entity = "unmanaged"
+
+ // EntityRemoteNode is an entity that represents all remote nodes
+ EntityRemoteNode Entity = "remote-node"
+
+ // EntityHealth is an entity that represents all health endpoints.
+ EntityHealth Entity = "health"
+
+ // EntityNone is an entity that can be selected but never exist
+ EntityNone Entity = "none"
+
+ // EntityNone is an entity that represents the kube-apiserver.
+ EntityKubeAPIServer Entity = "kube-apiserver"
+)
+
+var (
+ endpointSelectorWorld = NewESFromLabels(labels.NewLabel(labels.IDNameWorld, "", labels.LabelSourceReserved))
+
+ endpointSelectorWorldIPv4 = NewESFromLabels(labels.NewLabel(labels.IDNameWorldIPv4, "", labels.LabelSourceReserved))
+
+ endpointSelectorWorldIPv6 = NewESFromLabels(labels.NewLabel(labels.IDNameWorldIPv6, "", labels.LabelSourceReserved))
+
+ endpointSelectorHost = NewESFromLabels(labels.NewLabel(labels.IDNameHost, "", labels.LabelSourceReserved))
+
+ endpointSelectorInit = NewESFromLabels(labels.NewLabel(labels.IDNameInit, "", labels.LabelSourceReserved))
+
+ endpointSelectorIngress = NewESFromLabels(labels.NewLabel(labels.IDNameIngress, "", labels.LabelSourceReserved))
+
+ endpointSelectorRemoteNode = NewESFromLabels(labels.NewLabel(labels.IDNameRemoteNode, "", labels.LabelSourceReserved))
+
+ endpointSelectorHealth = NewESFromLabels(labels.NewLabel(labels.IDNameHealth, "", labels.LabelSourceReserved))
+
+ EndpointSelectorNone = NewESFromLabels(labels.NewLabel(labels.IDNameNone, "", labels.LabelSourceReserved))
+
+ endpointSelectorUnmanaged = NewESFromLabels(labels.NewLabel(labels.IDNameUnmanaged, "", labels.LabelSourceReserved))
+
+ endpointSelectorKubeAPIServer = NewESFromLabels(labels.LabelKubeAPIServer[labels.IDNameKubeAPIServer])
+
+ // EntitySelectorMapping maps special entity names that come in
+ // policies to selectors
+ EntitySelectorMapping = map[Entity]EndpointSelectorSlice{
+ EntityAll: {WildcardEndpointSelector},
+ EntityWorld: {endpointSelectorWorld},
+ EntityWorldIPv4: {endpointSelectorWorldIPv4},
+ EntityWorldIPv6: {endpointSelectorWorldIPv6},
+ EntityHost: {endpointSelectorHost},
+ EntityInit: {endpointSelectorInit},
+ EntityIngress: {endpointSelectorIngress},
+ EntityRemoteNode: {endpointSelectorRemoteNode},
+ EntityHealth: {endpointSelectorHealth},
+ EntityUnmanaged: {endpointSelectorUnmanaged},
+ EntityNone: {EndpointSelectorNone},
+ EntityKubeAPIServer: {endpointSelectorKubeAPIServer},
+
+ // EntityCluster is populated with an empty entry to allow the
+ // cilium client importing this package to perform basic rule
+ // validation. The basic rule validation only enforces
+ // awareness of individual entity names and does not require
+ // understanding of the individual endpoint selectors. The
+ // endpoint selector for the cluster entity can only be
+ // initialized at runtime as it depends on user configuration
+ // such as the cluster name. See InitEntities() below.
+ EntityCluster: {},
+ }
+)
+
+// EntitySlice is a slice of entities
+type EntitySlice []Entity
+
+// GetAsEndpointSelectors returns the provided entity slice as a slice of
+// endpoint selectors
+func (s EntitySlice) GetAsEndpointSelectors() EndpointSelectorSlice {
+ slice := EndpointSelectorSlice{}
+ for _, e := range s {
+ if selector, ok := EntitySelectorMapping[e]; ok {
+ slice = append(slice, selector...)
+ }
+ }
+
+ return slice
+}
+
+// InitEntities is called to initialize the policy API layer
+func InitEntities(clusterName string, treatRemoteNodeAsHost bool) {
+ EntitySelectorMapping[EntityCluster] = EndpointSelectorSlice{
+ endpointSelectorHost,
+ endpointSelectorRemoteNode,
+ endpointSelectorInit,
+ endpointSelectorIngress,
+ endpointSelectorHealth,
+ endpointSelectorUnmanaged,
+ endpointSelectorKubeAPIServer,
+ NewESFromLabels(labels.NewLabel(k8sapi.PolicyLabelCluster, clusterName, labels.LabelSourceK8s)),
+ }
+
+ hostSelectors := make(EndpointSelectorSlice, 0, 2)
+ hostSelectors = append(hostSelectors, endpointSelectorHost)
+ if treatRemoteNodeAsHost {
+ hostSelectors = append(hostSelectors, endpointSelectorRemoteNode)
+ }
+ EntitySelectorMapping[EntityHost] = hostSelectors
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/policy/api/fqdn.go b/vendor/github.com/cilium/cilium/pkg/policy/api/fqdn.go
new file mode 100644
index 000000000..1d015975f
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/policy/api/fqdn.go
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package api
+
+import (
+ "fmt"
+ "regexp"
+ "strings"
+
+ "github.com/cilium/cilium/pkg/fqdn/dns"
+ "github.com/cilium/cilium/pkg/fqdn/matchpattern"
+)
+
+var (
+ // allowedMatchNameChars tests that MatchName contains only valid DNS characters
+ allowedMatchNameChars = regexp.MustCompile("^[-a-zA-Z0-9_.]+$")
+
+ // allowedPatternChars tests that the MatchPattern field contains only the
+ // characters we want in our wilcard scheme.
+ allowedPatternChars = regexp.MustCompile("^[-a-zA-Z0-9_.*]+$") // the * inside the [] is a literal *
+
+ // FQDNMatchNameRegexString is a regex string which matches what's expected
+ // in the MatchName field in the FQDNSelector. This should be kept in-sync
+ // with the marker comment for validation. There's no way to use a Golang
+ // variable in the marker comment, so it's left up to the developer.
+ FQDNMatchNameRegexString = `^([-a-zA-Z0-9_]+[.]?)+$`
+
+ // FQDNMatchPatternRegexString is a regex string which matches what's expected
+ // in the MatchPattern field in the FQDNSelector. This should be kept in-sync
+ // with the marker comment for validation. There's no way to use a Golang
+ // variable in the marker comment, so it's left up to the developer.
+ FQDNMatchPatternRegexString = `^([-a-zA-Z0-9_*]+[.]?)+$`
+)
+
+type FQDNSelector struct {
+ // MatchName matches literal DNS names. A trailing "." is automatically added
+ // when missing.
+ //
+ // +kubebuilder:validation:Pattern=`^([-a-zA-Z0-9_]+[.]?)+$`
+ MatchName string `json:"matchName,omitempty"`
+
+ // MatchPattern allows using wildcards to match DNS names. All wildcards are
+ // case insensitive. The wildcards are:
+ // - "*" matches 0 or more DNS valid characters, and may occur anywhere in
+ // the pattern. As a special case a "*" as the leftmost character, without a
+ // following "." matches all subdomains as well as the name to the right.
+ // A trailing "." is automatically added when missing.
+ //
+ // Examples:
+ // `*.cilium.io` matches subomains of cilium at that level
+ // www.cilium.io and blog.cilium.io match, cilium.io and google.com do not
+ // `*cilium.io` matches cilium.io and all subdomains ends with "cilium.io"
+ // except those containing "." separator, subcilium.io and sub-cilium.io match,
+ // www.cilium.io and blog.cilium.io does not
+ // sub*.cilium.io matches subdomains of cilium where the subdomain component
+ // begins with "sub"
+ // sub.cilium.io and subdomain.cilium.io match, www.cilium.io,
+ // blog.cilium.io, cilium.io and google.com do not
+ //
+ // +kubebuilder:validation:Pattern=`^([-a-zA-Z0-9_*]+[.]?)+$`
+ MatchPattern string `json:"matchPattern,omitempty"`
+}
+
+func (s *FQDNSelector) String() string {
+ const m = "MatchName: "
+ const mm = ", MatchPattern: "
+ var str strings.Builder
+ str.Grow(len(m) + len(mm) + len(s.MatchName) + len(s.MatchPattern))
+ str.WriteString(m)
+ str.WriteString(s.MatchName)
+ str.WriteString(mm)
+ str.WriteString(s.MatchPattern)
+ return str.String()
+}
+
+// sanitize for FQDNSelector is a little wonky. While we do more processing
+// when using MatchName the basic requirement is that is a valid regexp. We
+// test that it can compile here.
+func (s *FQDNSelector) sanitize() error {
+ if len(s.MatchName) > 0 && len(s.MatchPattern) > 0 {
+ return fmt.Errorf("only one of MatchName or MatchPattern is allowed in an FQDNSelector")
+ }
+ if len(s.MatchName) > 0 && !allowedMatchNameChars.MatchString(s.MatchName) {
+ return fmt.Errorf("Invalid characters in MatchName: \"%s\". Only 0-9, a-z, A-Z and . and - characters are allowed", s.MatchName)
+ }
+
+ if len(s.MatchPattern) > 0 && !allowedPatternChars.MatchString(s.MatchPattern) {
+ return fmt.Errorf("Invalid characters in MatchPattern: \"%s\". Only 0-9, a-z, A-Z and ., - and * characters are allowed", s.MatchPattern)
+ }
+ _, err := matchpattern.Validate(s.MatchPattern)
+ return err
+}
+
+// ToRegex converts the given FQDNSelector to its corresponding regular
+// expression. If the MatchName field is set in the selector, it performs all
+// needed formatting to ensure that the field is a valid regular expression.
+func (s *FQDNSelector) ToRegex() (*regexp.Regexp, error) {
+ var preparedMatch string
+ if s.MatchName != "" {
+ preparedMatch = dns.FQDN(s.MatchName)
+ } else {
+ preparedMatch = matchpattern.Sanitize(s.MatchPattern)
+ }
+
+ regex, err := matchpattern.Validate(preparedMatch)
+ return regex, err
+}
+
+// PortRuleDNS is a list of allowed DNS lookups.
+type PortRuleDNS FQDNSelector
+
+// Sanitize checks that the matchName in the portRule can be compiled as a
+// regex. It does not check that a DNS name is a valid DNS name.
+func (r *PortRuleDNS) Sanitize() error {
+ if len(r.MatchName) > 0 && !allowedMatchNameChars.MatchString(r.MatchName) {
+ return fmt.Errorf("Invalid characters in MatchName: \"%s\". Only 0-9, a-z, A-Z and . and - characters are allowed", r.MatchName)
+ }
+
+ if len(r.MatchPattern) > 0 && !allowedPatternChars.MatchString(r.MatchPattern) {
+ return fmt.Errorf("Invalid characters in MatchPattern: \"%s\". Only 0-9, a-z, A-Z and ., - and * characters are allowed", r.MatchPattern)
+ }
+ _, err := matchpattern.Validate(r.MatchPattern)
+ return err
+}
+
+// GetAsEndpointSelectors returns a FQDNSelector as a single EntityNone
+// EndpointSelector slice.
+// Note that toFQDNs behaves differently than most other rules. The presence of
+// any toFQDNs rules means the endpoint must enforce policy, but the IPs are later
+// added as toCIDRSet entries and processed as such.
+func (s *FQDNSelector) GetAsEndpointSelectors() EndpointSelectorSlice {
+ return []EndpointSelector{EndpointSelectorNone}
+}
+
+// FQDNSelectorSlice is a wrapper type for []FQDNSelector to make is simpler to
+// bind methods.
+type FQDNSelectorSlice []FQDNSelector
+
+// GetAsEndpointSelectors will return a single EntityNone if any
+// toFQDNs rules exist, and a nil slice otherwise.
+func (s FQDNSelectorSlice) GetAsEndpointSelectors() EndpointSelectorSlice {
+ for _, rule := range s {
+ return rule.GetAsEndpointSelectors()
+ }
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/policy/api/groups.go b/vendor/github.com/cilium/cilium/pkg/policy/api/groups.go
new file mode 100644
index 000000000..a9ad79e6a
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/policy/api/groups.go
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package api
+
+import (
+ "context"
+ "fmt"
+ "net/netip"
+ "sync"
+
+ "github.com/cilium/cilium/pkg/ip"
+)
+
+const (
+ AWSProvider = "AWS" // AWS provider key
+)
+
+var (
+ providers = sync.Map{} // map with the list of providers to callback to retrieve info from.
+)
+
+// GroupProviderFunc is a func that need to be register to be able to
+// register a new provider in the platform.
+type GroupProviderFunc func(context.Context, *ToGroups) ([]netip.Addr, error)
+
+// ToGroups structure to store all kinds of new integrations that needs a new
+// derivative policy.
+type ToGroups struct {
+ AWS *AWSGroup `json:"aws,omitempty"`
+}
+
+// AWSGroup is an structure that can be used to whitelisting information from AWS integration
+type AWSGroup struct {
+ Labels map[string]string `json:"labels,omitempty"`
+ SecurityGroupsIds []string `json:"securityGroupsIds,omitempty"`
+ SecurityGroupsNames []string `json:"securityGroupsNames,omitempty"`
+ Region string `json:"region,omitempty"`
+}
+
+// RegisterToGroupsProvider it will register a new callback that will be used
+// when a new ToGroups rule is added.
+func RegisterToGroupsProvider(providerName string, callback GroupProviderFunc) {
+ providers.Store(providerName, callback)
+}
+
+// GetCidrSet will return the CIDRRule for the rule using the callbacks that
+// are register in the platform.
+func (group *ToGroups) GetCidrSet(ctx context.Context) ([]CIDRRule, error) {
+ var addrs []netip.Addr
+ // Get per provider CIDRSet
+ if group.AWS != nil {
+ callbackInterface, ok := providers.Load(AWSProvider)
+ if !ok {
+ return nil, fmt.Errorf("Provider %s is not registered", AWSProvider)
+ }
+ callback, ok := callbackInterface.(GroupProviderFunc)
+ if !ok {
+ return nil, fmt.Errorf("Provider callback for %s is not a valid instance", AWSProvider)
+ }
+ awsAddrs, err := callback(ctx, group)
+ if err != nil {
+ return nil, fmt.Errorf(
+ "Cannot retrieve data from %s provider: %s",
+ AWSProvider, err)
+ }
+ addrs = append(addrs, awsAddrs...)
+ }
+
+ resultAddrs := ip.KeepUniqueAddrs(addrs)
+ return addrsToCIDRRules(resultAddrs), nil
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/policy/api/http.go b/vendor/github.com/cilium/cilium/pkg/policy/api/http.go
new file mode 100644
index 000000000..fa7ee9173
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/policy/api/http.go
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package api
+
+import (
+ "fmt"
+ "regexp"
+)
+
+// MismatchAction specifies what to do when there is no header match
+// Empty string is the default for making the rule to fail the match.
+// Otherwise the rule is still considered as matching, but the mismatches
+// are logged in the access log.
+type MismatchAction string
+
+const (
+ MismatchActionLog MismatchAction = "LOG" // Keep checking other matches
+ MismatchActionAdd MismatchAction = "ADD" // Add the missing value to a possibly multi-valued header
+ MismatchActionDelete MismatchAction = "DELETE" // Remove the whole mismatching header
+ MismatchActionReplace MismatchAction = "REPLACE" // Replace (of add if missing) the header
+)
+
+// HeaderMatch extends the HeaderValue for matching requirement of a
+// named header field against an immediate string, a secret value, or
+// a regex. If none of the optional fields is present, then the
+// header value is not matched, only presence of the header is enough.
+type HeaderMatch struct {
+ // Mismatch identifies what to do in case there is no match. The default is
+ // to drop the request. Otherwise the overall rule is still considered as
+ // matching, but the mismatches are logged in the access log.
+ //
+ // +kubebuilder:validation:Enum=LOG;ADD;DELETE;REPLACE
+ // +kubebuilder:validation:Optional
+ Mismatch MismatchAction `json:"mismatch,omitempty"`
+
+ // Name identifies the header.
+ Name string `json:"name"`
+
+ // Secret refers to a secret that contains the value to be matched against.
+ // The secret must only contain one entry. If the referred secret does not
+ // exist, and there is no "Value" specified, the match will fail.
+ //
+ // +kubebuilder:validation:Optional
+ Secret *Secret `json:"secret,omitempty"`
+
+ // Value matches the exact value of the header. Can be specified either
+ // alone or together with "Secret"; will be used as the header value if the
+ // secret can not be found in the latter case.
+ //
+ // +kubebuilder:validation:Optional
+ Value string `json:"value,omitempty"`
+}
+
+// PortRuleHTTP is a list of HTTP protocol constraints. All fields are
+// optional, if all fields are empty or missing, the rule does not have any
+// effect.
+//
+// All fields of this type are extended POSIX regex as defined by IEEE Std
+// 1003.1, (i.e this follows the egrep/unix syntax, not the perl syntax)
+// matched against the path of an incoming request. Currently it can contain
+// characters disallowed from the conventional "path" part of a URL as defined
+// by RFC 3986.
+type PortRuleHTTP struct {
+ // Path is an extended POSIX regex matched against the path of a
+ // request. Currently it can contain characters disallowed from the
+ // conventional "path" part of a URL as defined by RFC 3986.
+ //
+ // If omitted or empty, all paths are all allowed.
+ //
+ // +kubebuilder:validation:Optional
+ Path string `json:"path,omitempty"`
+
+ // Method is an extended POSIX regex matched against the method of a
+ // request, e.g. "GET", "POST", "PUT", "PATCH", "DELETE", ...
+ //
+ // If omitted or empty, all methods are allowed.
+ //
+ // +kubebuilder:validation:Optional
+ Method string `json:"method,omitempty"`
+
+ // Host is an extended POSIX regex matched against the host header of a
+ // request, e.g. "foo.com"
+ //
+ // If omitted or empty, the value of the host header is ignored.
+ //
+ // +kubebuilder:validation:Format=idn-hostname
+ // +kubebuilder:validation:Optional
+ Host string `json:"host,omitempty"`
+
+ // Headers is a list of HTTP headers which must be present in the
+ // request. If omitted or empty, requests are allowed regardless of
+ // headers present.
+ //
+ // +kubebuilder:validation:Optional
+ Headers []string `json:"headers,omitempty"`
+
+ // HeaderMatches is a list of HTTP headers which must be
+ // present and match against the given values. Mismatch field can be used
+ // to specify what to do when there is no match.
+ //
+ // +kubebuilder:validation:Optional
+ HeaderMatches []*HeaderMatch `json:"headerMatches,omitempty"`
+}
+
+// Sanitize sanitizes HTTP rules. It ensures that the path and method fields
+// are valid regular expressions. Note that the proxy may support a wider-range
+// of regular expressions (e.g. that specified by ECMAScript), so this function
+// may return some false positives. If the rule is invalid, returns an error.
+func (h *PortRuleHTTP) Sanitize() error {
+
+ if h.Path != "" {
+ _, err := regexp.Compile(h.Path)
+ if err != nil {
+ return err
+ }
+ }
+
+ if h.Method != "" {
+ _, err := regexp.Compile(h.Method)
+ if err != nil {
+ return err
+ }
+ }
+
+ // Headers are not sanitized.
+
+ // But HeaderMatches are
+ for _, m := range h.HeaderMatches {
+ if m.Name == "" {
+ return fmt.Errorf("Header name missing")
+ }
+ if m.Mismatch != "" &&
+ m.Mismatch != MismatchActionLog && m.Mismatch != MismatchActionAdd &&
+ m.Mismatch != MismatchActionDelete && m.Mismatch != MismatchActionReplace {
+ return fmt.Errorf("Invalid header action: %s", m.Mismatch)
+ }
+ if m.Secret != nil && m.Secret.Name == "" {
+ return fmt.Errorf("Secret name missing")
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/policy/api/icmp.go b/vendor/github.com/cilium/cilium/pkg/policy/api/icmp.go
new file mode 100644
index 000000000..c2a684729
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/policy/api/icmp.go
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package api
+
+import "strconv"
+
+const (
+ IPv4Family = "IPv4"
+ IPv6Family = "IPv6"
+)
+
+type ICMPRules []ICMPRule
+
+// ICMPRule is a list of ICMP fields.
+type ICMPRule struct {
+ // Fields is a list of ICMP fields.
+ //
+ // +kubebuilder:validation:Optional
+ Fields []ICMPField `json:"fields,omitempty"`
+}
+
+// ICMPField is a ICMP field.
+type ICMPField struct {
+ // Family is a IP address version.
+ // Currently, we support `IPv4` and `IPv6`.
+ // `IPv4` is set as default.
+ //
+ // +kubebuilder:default=IPv4
+ // +kubebuilder:validation:Optional
+ // +kubebuilder:validation:Enum=IPv4;IPv6
+ Family string `json:"family,omitempty"`
+
+ // Type is a ICMP-type.
+ // It should be 0-255 (8bit).
+ //
+ // +kubebuilder:validation:Maximum=255
+ // +kubebuilder:validation:Minimum=0
+ Type uint8 `json:"type"`
+}
+
+// Iterate iterates over all elements of ICMPRules.
+func (ir ICMPRules) Iterate(f func(pr Ports) error) error {
+ for i := range ir {
+ if err := f(&ir[i]); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Len returns the length of the elements of ICMPRules.
+func (ir ICMPRules) Len() int {
+ return len(ir)
+}
+
+// GetPortProtocols generates PortProtocol slice from ICMPRule and returns it.
+func (ir ICMPRule) GetPortProtocols() []PortProtocol {
+ var pps []PortProtocol
+ for _, t := range ir.Fields {
+ pp := t.PortProtocol()
+ pps = append(pps, *pp)
+ }
+ return pps
+}
+
+// GetPortRule generates PortRule from ICMPRule and returns it.
+func (ir ICMPRule) GetPortRule() *PortRule {
+ var pps []PortProtocol
+ for _, t := range ir.Fields {
+ pp := t.PortProtocol()
+ pps = append(pps, *pp)
+ }
+ pr := PortRule{
+ Ports: pps,
+ }
+ return &pr
+}
+
+// PortProtocol translates ICMPType to PortProtocol.
+func (i ICMPField) PortProtocol() *PortProtocol {
+ var proto L4Proto
+
+ typeStr := strconv.Itoa(int(i.Type))
+ if i.Family == IPv6Family {
+ proto = ProtoICMPv6
+ } else {
+ proto = ProtoICMP
+ }
+
+ pr := PortProtocol{
+ Port: typeStr,
+ Protocol: proto,
+ }
+ return &pr
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/policy/api/ingress.go b/vendor/github.com/cilium/cilium/pkg/policy/api/ingress.go
new file mode 100644
index 000000000..a727d9df2
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/policy/api/ingress.go
@@ -0,0 +1,215 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package api
+
+import (
+ slim_metav1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1"
+)
+
+// IngressCommonRule is a rule that shares some of its fields across the
+// IngressRule and IngressDenyRule. It's publicly exported so the code
+// generators can generate code for this structure.
+type IngressCommonRule struct {
+ // FromEndpoints is a list of endpoints identified by an
+ // EndpointSelector which are allowed to communicate with the endpoint
+ // subject to the rule.
+ //
+ // Example:
+ // Any endpoint with the label "role=backend" can be consumed by any
+ // endpoint carrying the label "role=frontend".
+ //
+ // +kubebuilder:validation:Optional
+ FromEndpoints []EndpointSelector `json:"fromEndpoints,omitempty"`
+
+ // FromRequires is a list of additional constraints which must be met
+ // in order for the selected endpoints to be reachable. These
+ // additional constraints do no by itself grant access privileges and
+ // must always be accompanied with at least one matching FromEndpoints.
+ //
+ // Example:
+ // Any Endpoint with the label "team=A" requires consuming endpoint
+ // to also carry the label "team=A".
+ //
+ // +kubebuilder:validation:Optional
+ FromRequires []EndpointSelector `json:"fromRequires,omitempty"`
+
+ // FromCIDR is a list of IP blocks which the endpoint subject to the
+ // rule is allowed to receive connections from. Only connections which
+ // do *not* originate from the cluster or from the local host are subject
+ // to CIDR rules. In order to allow in-cluster connectivity, use the
+ // FromEndpoints field. This will match on the source IP address of
+ // incoming connections. Adding a prefix into FromCIDR or into
+ // FromCIDRSet with no ExcludeCIDRs is equivalent. Overlaps are
+ // allowed between FromCIDR and FromCIDRSet.
+ //
+ // Example:
+ // Any endpoint with the label "app=my-legacy-pet" is allowed to receive
+ // connections from 10.3.9.1
+ //
+ // +kubebuilder:validation:Optional
+ FromCIDR CIDRSlice `json:"fromCIDR,omitempty"`
+
+ // FromCIDRSet is a list of IP blocks which the endpoint subject to the
+ // rule is allowed to receive connections from in addition to FromEndpoints,
+ // along with a list of subnets contained within their corresponding IP block
+ // from which traffic should not be allowed.
+ // This will match on the source IP address of incoming connections. Adding
+ // a prefix into FromCIDR or into FromCIDRSet with no ExcludeCIDRs is
+ // equivalent. Overlaps are allowed between FromCIDR and FromCIDRSet.
+ //
+ // Example:
+ // Any endpoint with the label "app=my-legacy-pet" is allowed to receive
+ // connections from 10.0.0.0/8 except from IPs in subnet 10.96.0.0/12.
+ //
+ // +kubebuilder:validation:Optional
+ FromCIDRSet CIDRRuleSlice `json:"fromCIDRSet,omitempty"`
+
+ // FromEntities is a list of special entities which the endpoint subject
+ // to the rule is allowed to receive connections from. Supported entities are
+ // `world`, `cluster` and `host`
+ //
+ // +kubebuilder:validation:Optional
+ FromEntities EntitySlice `json:"fromEntities,omitempty"`
+
+ // TODO: Move this to the policy package
+ // (https://github.com/cilium/cilium/issues/8353)
+ aggregatedSelectors EndpointSelectorSlice `json:"-"`
+}
+
+// IngressRule contains all rule types which can be applied at ingress,
+// i.e. network traffic that originates outside of the endpoint and
+// is entering the endpoint selected by the endpointSelector.
+//
+// - All members of this structure are optional. If omitted or empty, the
+// member will have no effect on the rule.
+//
+// - If multiple members are set, all of them need to match in order for
+// the rule to take effect. The exception to this rule is FromRequires field;
+// the effects of any Requires field in any rule will apply to all other
+// rules as well.
+//
+// - FromEndpoints, FromCIDR, FromCIDRSet and FromEntities are mutually
+// exclusive. Only one of these members may be present within an individual
+// rule.
+type IngressRule struct {
+ IngressCommonRule `json:",inline"`
+
+ // ToPorts is a list of destination ports identified by port number and
+ // protocol which the endpoint subject to the rule is allowed to
+ // receive connections on.
+ //
+ // Example:
+ // Any endpoint with the label "app=httpd" can only accept incoming
+ // connections on port 80/tcp.
+ //
+ // +kubebuilder:validation:Optional
+ ToPorts PortRules `json:"toPorts,omitempty"`
+
+ // ICMPs is a list of ICMP rule identified by type number
+ // which the endpoint subject to the rule is allowed to
+ // receive connections on.
+ //
+ // Example:
+ // Any endpoint with the label "app=httpd" can only accept incoming
+ // type 8 ICMP connections.
+ //
+ // +kubebuilder:validation:Optional
+ ICMPs ICMPRules `json:"icmps,omitempty"`
+
+ // Authentication is the required authentication type for the allowed traffic, if any.
+ //
+ // +kubebuilder:validation:Optional
+ Authentication *Authentication `json:"authentication,omitempty"`
+}
+
+// IngressDenyRule contains all rule types which can be applied at ingress,
+// i.e. network traffic that originates outside of the endpoint and
+// is entering the endpoint selected by the endpointSelector.
+//
+// - All members of this structure are optional. If omitted or empty, the
+// member will have no effect on the rule.
+//
+// - If multiple members are set, all of them need to match in order for
+// the rule to take effect. The exception to this rule is FromRequires field;
+// the effects of any Requires field in any rule will apply to all other
+// rules as well.
+//
+// - FromEndpoints, FromCIDR, FromCIDRSet and FromEntities are mutually
+// exclusive. Only one of these members may be present within an individual
+// rule.
+type IngressDenyRule struct {
+ IngressCommonRule `json:",inline"`
+
+ // ToPorts is a list of destination ports identified by port number and
+ // protocol which the endpoint subject to the rule is not allowed to
+ // receive connections on.
+ //
+ // Example:
+ // Any endpoint with the label "app=httpd" can not accept incoming
+ // connections on port 80/tcp.
+ //
+ // +kubebuilder:validation:Optional
+ ToPorts PortDenyRules `json:"toPorts,omitempty"`
+
+ // ICMPs is a list of ICMP rule identified by type number
+ // which the endpoint subject to the rule is not allowed to
+ // receive connections on.
+ //
+ // Example:
+ // Any endpoint with the label "app=httpd" can not accept incoming
+ // type 8 ICMP connections.
+ //
+ // +kubebuilder:validation:Optional
+ ICMPs ICMPRules `json:"icmps,omitempty"`
+}
+
+// SetAggregatedSelectors creates a single slice containing all of the following
+// fields within the IngressRule, converted to EndpointSelector, to be stored
+// within the IngressRule for easy lookup while performing policy evaluation
+// for the rule:
+// * FromEntities
+// * FromCIDR
+// * FromCIDRSet
+//
+// FromEndpoints is not aggregated due to requirement folding in
+// GetSourceEndpointSelectorsWithRequirements()
+func (i *IngressCommonRule) SetAggregatedSelectors() {
+ res := make(EndpointSelectorSlice, 0, len(i.FromEntities)+len(i.FromCIDR)+len(i.FromCIDRSet))
+ res = append(res, i.FromEntities.GetAsEndpointSelectors()...)
+ res = append(res, i.FromCIDR.GetAsEndpointSelectors()...)
+ res = append(res, i.FromCIDRSet.GetAsEndpointSelectors()...)
+ // Goroutines can race setting this, but they will all compute
+ // the same result, so it does not matter.
+ i.aggregatedSelectors = res
+}
+
+// GetSourceEndpointSelectorsWithRequirements returns a slice of endpoints selectors covering
+// all L3 source selectors of the ingress rule
+func (i *IngressCommonRule) GetSourceEndpointSelectorsWithRequirements(requirements []slim_metav1.LabelSelectorRequirement) EndpointSelectorSlice {
+ if i.aggregatedSelectors == nil {
+ i.SetAggregatedSelectors()
+ }
+ res := make(EndpointSelectorSlice, 0, len(i.FromEndpoints)+len(i.aggregatedSelectors))
+ if len(requirements) > 0 && len(i.FromEndpoints) > 0 {
+ for idx := range i.FromEndpoints {
+ sel := *i.FromEndpoints[idx].DeepCopy()
+ sel.MatchExpressions = append(sel.MatchExpressions, requirements...)
+ sel.SyncRequirementsWithLabelSelector()
+ // Even though this string is deep copied, we need to override it
+ // because we are updating the contents of the MatchExpressions.
+ sel.cachedLabelSelectorString = sel.LabelSelector.String()
+ res = append(res, sel)
+ }
+ } else {
+ res = append(res, i.FromEndpoints...)
+ }
+
+ return append(res, i.aggregatedSelectors...)
+}
+
+// AllowsWildcarding returns true if wildcarding should be performed upon
+// policy evaluation for the given rule.
+func (i *IngressCommonRule) AllowsWildcarding() bool {
+ return len(i.FromRequires) == 0
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/policy/api/l4.go b/vendor/github.com/cilium/cilium/pkg/policy/api/l4.go
new file mode 100644
index 000000000..d41adf1b6
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/policy/api/l4.go
@@ -0,0 +1,326 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package api
+
+import (
+ "github.com/cilium/proxy/pkg/policy/api/kafka"
+)
+
+// L4Proto is a layer 4 protocol name
+type L4Proto string
+
+const (
+ // Keep pkg/u8proto up-to-date with any additions here
+
+ ProtoTCP L4Proto = "TCP"
+ ProtoUDP L4Proto = "UDP"
+ ProtoSCTP L4Proto = "SCTP"
+ ProtoICMP L4Proto = "ICMP"
+ ProtoICMPv6 L4Proto = "ICMPV6"
+ ProtoAny L4Proto = "ANY"
+
+ PortProtocolAny = "0/ANY"
+)
+
+// PortProtocol specifies an L4 port with an optional transport protocol
+type PortProtocol struct {
+ // Port is an L4 port number. For now the string will be strictly
+ // parsed as a single uint16. In the future, this field may support
+ // ranges in the form "1024-2048
+ // Port can also be a port name, which must contain at least one [a-z],
+ // and may also contain [0-9] and '-' anywhere except adjacent to another
+ // '-' or in the beginning or the end.
+ //
+ // +kubebuilder:validation:Pattern=`^(6553[0-5]|655[0-2][0-9]|65[0-4][0-9]{2}|6[0-4][0-9]{3}|[1-5][0-9]{4}|[0-9]{1,4})|([a-zA-Z0-9]-?)*[a-zA-Z](-?[a-zA-Z0-9])*$`
+ Port string `json:"port"`
+
+ // Protocol is the L4 protocol. If omitted or empty, any protocol
+ // matches. Accepted values: "TCP", "UDP", "SCTP", "ANY"
+ //
+ // Matching on ICMP is not supported.
+ //
+ // Named port specified for a container may narrow this down, but may not
+ // contradict this.
+ //
+ // +kubebuilder:validation:Enum=TCP;UDP;SCTP;ANY
+ // +kubebuilder:validation:Optional
+ Protocol L4Proto `json:"protocol,omitempty"`
+}
+
+// Covers returns true if the ports and protocol specified in the received
+// PortProtocol are equal to or a superset of the ports and protocol in 'other'.
+// Named ports only cover other named ports with exactly the same name.
+func (p PortProtocol) Covers(other PortProtocol) bool {
+ if p.Port != other.Port {
+ return false
+ }
+ if p.Protocol != other.Protocol {
+ return p.Protocol == "" || p.Protocol == ProtoAny
+ }
+ return true
+}
+
+// Secret is a reference to a secret, backed by k8s or local file system.
+type Secret struct {
+ // Namespace is the namespace in which the secret exists. Context of use
+ // determines the default value if left out (e.g., "default").
+ //
+ // +kubebuilder:validation:Optional
+ Namespace string `json:"namespace,omitempty"`
+
+ // Name is the name of the secret.
+ //
+ // +kubebuilder:validation:Required
+ Name string `json:"name"`
+}
+
+// TLSContext provides TLS configuration via reference to either k8s secrets
+// or via filepath. If both are set, directory is given priority over
+// k8sSecrets.
+type TLSContext struct {
+ // Secret is the secret that contains the certificates and private key for
+ // the TLS context.
+ // By default, Cilium will search in this secret for the following items:
+ // - 'ca.crt' - Which represents the trusted CA to verify remote source.
+ // - 'tls.crt' - Which represents the public key certificate.
+ // - 'tls.key' - Which represents the private key matching the public key
+ // certificate.
+ //
+ // +kubebuilder:validation:Required
+ Secret *Secret `json:"secret"`
+
+ // TrustedCA is the file name or k8s secret item name for the trusted CA.
+ // If omitted, 'ca.crt' is assumed, if it exists. If given, the item must
+ // exist.
+ //
+ // +kubebuilder:validation:Optional
+ TrustedCA string `json:"trustedCA,omitempty"`
+
+ // Certificate is the file name or k8s secret item name for the certificate
+ // chain. If omitted, 'tls.crt' is assumed, if it exists. If given, the
+ // item must exist.
+ //
+ // +kubebuilder:validation:Optional
+ Certificate string `json:"certificate,omitempty"`
+
+ // PrivateKey is the file name or k8s secret item name for the private key
+ // matching the certificate chain. If omitted, 'tls.key' is assumed, if it
+ // exists. If given, the item must exist.
+ //
+ // +kubebuilder:validation:Optional
+ PrivateKey string `json:"privateKey,omitempty"`
+}
+
+// EnvoyConfig defines a reference to a CiliumEnvoyConfig or CiliumClusterwideEnvoyConfig
+type EnvoyConfig struct {
+ // Kind is the resource type being referred to. Defaults to CiliumEnvoyConfig or
+ // CiliumClusterwideEnvoyConfig for CiliumNetworkPolicy and CiliumClusterwideNetworkPolicy,
+ // respectively. The only case this is currently explicitly needed is when referring to a
+ // CiliumClusterwideEnvoyConfig from CiliumNetworkPolicy, as using a namespaced listener
+ // from a cluster scoped policy is not allowed.
+ //
+ // +kubebuilder:validation:Enum=CiliumEnvoyConfig;CiliumClusterwideEnvoyConfig
+ // +kubebuilder:validation:Optional
+ Kind string `json:"kind"`
+
+ // Name is the resource name of the CiliumEnvoyConfig or CiliumClusterwideEnvoyConfig where
+ // the listener is defined in.
+ //
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:validation:Required
+ Name string `json:"name"`
+}
+
+// Listener defines a reference to an Envoy listener specified in a CEC or CCEC resource.
+type Listener struct {
+ // EnvoyConfig is a reference to the CEC or CCNP resource in which
+ // the listener is defined.
+ //
+ // +kubebuilder:validation:Required
+ EnvoyConfig *EnvoyConfig `json:"envoyConfig"`
+
+ // Name is the name of the listener.
+ //
+ // +kubebuilder:validation:MinLength=1
+ // +kubebuilder:validation:Required
+ Name string `json:"name"`
+}
+
+// PortRule is a list of ports/protocol combinations with optional Layer 7
+// rules which must be met.
+type PortRule struct {
+ // Ports is a list of L4 port/protocol
+ //
+ // +kubebuilder:validation:Optional
+ Ports []PortProtocol `json:"ports,omitempty"`
+
+ // TerminatingTLS is the TLS context for the connection terminated by
+ // the L7 proxy. For egress policy this specifies the server-side TLS
+ // parameters to be applied on the connections originated from the local
+ // endpoint and terminated by the L7 proxy. For ingress policy this specifies
+ // the server-side TLS parameters to be applied on the connections
+ // originated from a remote source and terminated by the L7 proxy.
+ //
+ // +kubebuilder:validation:Optional
+ TerminatingTLS *TLSContext `json:"terminatingTLS,omitempty"`
+
+ // OriginatingTLS is the TLS context for the connections originated by
+ // the L7 proxy. For egress policy this specifies the client-side TLS
+ // parameters for the upstream connection originating from the L7 proxy
+ // to the remote destination. For ingress policy this specifies the
+ // client-side TLS parameters for the connection from the L7 proxy to
+ // the local endpoint.
+ //
+ // +kubebuilder:validation:Optional
+ OriginatingTLS *TLSContext `json:"originatingTLS,omitempty"`
+
+ // ServerNames is a list of allowed TLS SNI values. If not empty, then
+ // TLS must be present and one of the provided SNIs must be indicated in the
+ // TLS handshake.
+ //
+ // +kubebuilder:validation:Optional
+ ServerNames []string `json:"serverNames,omitempty"`
+
+ // listener specifies the name of a custom Envoy listener to which this traffic should be
+ // redirected to.
+ //
+ // +kubebuilder:validation:Optional
+ Listener *Listener `json:"listener,omitempty"`
+
+ // Rules is a list of additional port level rules which must be met in
+ // order for the PortRule to allow the traffic. If omitted or empty,
+ // no layer 7 rules are enforced.
+ //
+ // +kubebuilder:validation:Optional
+ Rules *L7Rules `json:"rules,omitempty"`
+}
+
+// GetPortProtocols returns the Ports field of the PortRule.
+func (pd PortRule) GetPortProtocols() []PortProtocol {
+ return pd.Ports
+}
+
+// GetPortRule returns the PortRule.
+func (pd *PortRule) GetPortRule() *PortRule {
+ return pd
+}
+
+// PortDenyRule is a list of ports/protocol that should be used for deny
+// policies. This structure lacks the L7Rules since it's not supported in deny
+// policies.
+type PortDenyRule struct {
+ // Ports is a list of L4 port/protocol
+ //
+ // +kubebuilder:validation:Optional
+ Ports []PortProtocol `json:"ports,omitempty"`
+}
+
+// GetPortProtocols returns the Ports field of the PortDenyRule.
+func (pd PortDenyRule) GetPortProtocols() []PortProtocol {
+ return pd.Ports
+}
+
+// GetPortRule returns nil has it is not a PortRule.
+func (pd *PortDenyRule) GetPortRule() *PortRule {
+ return nil
+}
+
+// L7Rules is a union of port level rule types. Mixing of different port
+// level rule types is disallowed, so exactly one of the following must be set.
+// If none are specified, then no additional port level rules are applied.
+type L7Rules struct {
+ // HTTP specific rules.
+ //
+ // +kubebuilder:validation:Optional
+ HTTP []PortRuleHTTP `json:"http,omitempty"`
+
+ // Kafka-specific rules.
+ //
+ // +kubebuilder:validation:Optional
+ Kafka []kafka.PortRule `json:"kafka,omitempty"`
+
+ // DNS-specific rules.
+ //
+ // +kubebuilder:validation:Optional
+ DNS []PortRuleDNS `json:"dns,omitempty"`
+
+ // Name of the L7 protocol for which the Key-value pair rules apply.
+ //
+ // +kubebuilder:validation:Optional
+ L7Proto string `json:"l7proto,omitempty"`
+
+ // Key-value pair rules.
+ //
+ // +kubebuilder:validation:Optional
+ L7 []PortRuleL7 `json:"l7,omitempty"`
+}
+
+// Len returns the total number of rules inside `L7Rules`.
+// Returns 0 if nil.
+func (rules *L7Rules) Len() int {
+ if rules == nil {
+ return 0
+ }
+ return len(rules.HTTP) + len(rules.Kafka) + len(rules.DNS) + len(rules.L7)
+}
+
+// IsEmpty returns whether the `L7Rules` is nil or contains no rules.
+func (rules *L7Rules) IsEmpty() bool {
+ return rules.Len() == 0
+}
+
+// PortRules is a slice of PortRule.
+type PortRules []PortRule
+
+// Iterate iterates over all elements of PortRules.
+func (pr PortRules) Iterate(f func(pr Ports) error) error {
+ for i := range pr {
+ err := f(&pr[i])
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Len returns the length of the elements of PortRules.
+func (pr PortRules) Len() int {
+ return len(pr)
+}
+
+// PortDenyRules is a slice of PortDenyRule.
+type PortDenyRules []PortDenyRule
+
+// Iterate iterates over all elements of PortDenyRules.
+func (pr PortDenyRules) Iterate(f func(pr Ports) error) error {
+ for i := range pr {
+ err := f(&pr[i])
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Len returns the length of the elements of PortDenyRules.
+func (pr PortDenyRules) Len() int {
+ return len(pr)
+}
+
+// Ports is an interface that should be used by all implementations of the
+// PortProtocols.
+type Ports interface {
+ // GetPortProtocols returns the slice PortProtocol
+ GetPortProtocols() []PortProtocol
+ // GetPortRule returns a PortRule, if the implementation does not support
+ // it, then returns nil.
+ GetPortRule() *PortRule
+}
+
+// PortsIterator is an interface that should be implemented by structures that
+// can iterate over a list of Ports interfaces.
+type PortsIterator interface {
+ Iterate(f func(pr Ports) error) error
+ Len() int
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/policy/api/l7.go b/vendor/github.com/cilium/cilium/pkg/policy/api/l7.go
new file mode 100644
index 000000000..ba1e924f0
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/policy/api/l7.go
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package api
+
+import (
+ "fmt"
+)
+
+// PortRuleL7 is a list of key-value pairs interpreted by a L7 protocol as
+// protocol constraints. All fields are optional, if all fields are empty or
+// missing, the rule does not have any effect.
+type PortRuleL7 map[string]string
+
+// Sanitize sanitizes key-value pair rules. It makes sure keys are present.
+func (rule *PortRuleL7) Sanitize() error {
+ for k := range *rule {
+ if k == "" {
+ return fmt.Errorf("Empty key not allowed")
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/policy/api/rule.go b/vendor/github.com/cilium/cilium/pkg/policy/api/rule.go
new file mode 100644
index 000000000..f0224b1f4
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/policy/api/rule.go
@@ -0,0 +1,244 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package api
+
+import (
+ "context"
+ "encoding/json"
+
+ "github.com/cilium/cilium/pkg/labels"
+)
+
+// AuthenticationMode is a string identifying a supported authentication type
+type AuthenticationMode string
+
+const (
+ AuthenticationModeDisabled AuthenticationMode = "disabled" // Always succeeds
+ AuthenticationModeRequired AuthenticationMode = "required" // Mutual TLS with SPIFFE as certificate provider by default
+ AuthenticationModeAlwaysFail AuthenticationMode = "test-always-fail"
+)
+
+// Authentication specifies the kind of cryptographic authentication required for the traffic to
+// be allowed.
+type Authentication struct {
+ // Mode is the required authentication mode for the allowed traffic, if any.
+ //
+ // +kubebuilder:validation:Enum=disabled;required;test-always-fail
+ // +kubebuilder:validation:Required
+ Mode AuthenticationMode `json:"mode"`
+}
+
+// Rule is a policy rule which must be applied to all endpoints which match the
+// labels contained in the endpointSelector
+//
+// Each rule is split into an ingress section which contains all rules
+// applicable at ingress, and an egress section applicable at egress. For rule
+// types such as `L4Rule` and `CIDR` which can be applied at both ingress and
+// egress, both ingress and egress side have to either specifically allow the
+// connection or one side has to be omitted.
+//
+// Either ingress, egress, or both can be provided. If both ingress and egress
+// are omitted, the rule has no effect.
+//
+// +deepequal-gen:private-method=true
+type Rule struct {
+ // EndpointSelector selects all endpoints which should be subject to
+ // this rule. EndpointSelector and NodeSelector cannot be both empty and
+ // are mutually exclusive.
+ //
+ // +kubebuilder:validation:OneOf
+ EndpointSelector EndpointSelector `json:"endpointSelector,omitempty"`
+
+ // NodeSelector selects all nodes which should be subject to this rule.
+ // EndpointSelector and NodeSelector cannot be both empty and are mutually
+ // exclusive. Can only be used in CiliumClusterwideNetworkPolicies.
+ //
+ // +kubebuilder:validation:OneOf
+ NodeSelector EndpointSelector `json:"nodeSelector,omitempty"`
+
+ // Ingress is a list of IngressRule which are enforced at ingress.
+ // If omitted or empty, this rule does not apply at ingress.
+ //
+ // +kubebuilder:validation:Optional
+ Ingress []IngressRule `json:"ingress,omitempty"`
+
+ // IngressDeny is a list of IngressDenyRule which are enforced at ingress.
+ // Any rule inserted here will be denied regardless of the allowed ingress
+ // rules in the 'ingress' field.
+ // If omitted or empty, this rule does not apply at ingress.
+ //
+ // +kubebuilder:validation:Optional
+ IngressDeny []IngressDenyRule `json:"ingressDeny,omitempty"`
+
+ // Egress is a list of EgressRule which are enforced at egress.
+ // If omitted or empty, this rule does not apply at egress.
+ //
+ // +kubebuilder:validation:Optional
+ Egress []EgressRule `json:"egress,omitempty"`
+
+ // EgressDeny is a list of EgressDenyRule which are enforced at egress.
+ // Any rule inserted here will be denied regardless of the allowed egress
+ // rules in the 'egress' field.
+ // If omitted or empty, this rule does not apply at egress.
+ //
+ // +kubebuilder:validation:Optional
+ EgressDeny []EgressDenyRule `json:"egressDeny,omitempty"`
+
+ // Labels is a list of optional strings which can be used to
+ // re-identify the rule or to store metadata. It is possible to lookup
+ // or delete strings based on labels. Labels are not required to be
+ // unique, multiple rules can have overlapping or identical labels.
+ //
+ // +kubebuilder:validation:Optional
+ Labels labels.LabelArray `json:"labels,omitempty"`
+
+ // Description is a free form string, it can be used by the creator of
+ // the rule to store human readable explanation of the purpose of this
+ // rule. Rules cannot be identified by comment.
+ //
+ // +kubebuilder:validation:Optional
+ Description string `json:"description,omitempty"`
+}
+
+// MarshalJSON returns the JSON encoding of Rule r. We need to overwrite it to
+// enforce omitempty on the EndpointSelector nested structures.
+func (r *Rule) MarshalJSON() ([]byte, error) {
+ type common struct {
+ Ingress []IngressRule `json:"ingress,omitempty"`
+ IngressDeny []IngressDenyRule `json:"ingressDeny,omitempty"`
+ Egress []EgressRule `json:"egress,omitempty"`
+ EgressDeny []EgressDenyRule `json:"egressDeny,omitempty"`
+ Labels labels.LabelArray `json:"labels,omitempty"`
+ Description string `json:"description,omitempty"`
+ }
+
+ var a interface{}
+ ruleCommon := common{
+ Ingress: r.Ingress,
+ IngressDeny: r.IngressDeny,
+ Egress: r.Egress,
+ EgressDeny: r.EgressDeny,
+ Labels: r.Labels,
+ Description: r.Description,
+ }
+
+ // Only one of endpointSelector or nodeSelector is permitted.
+ switch {
+ case r.EndpointSelector.LabelSelector != nil:
+ a = struct {
+ EndpointSelector EndpointSelector `json:"endpointSelector,omitempty"`
+ common
+ }{
+ EndpointSelector: r.EndpointSelector,
+ common: ruleCommon,
+ }
+ case r.NodeSelector.LabelSelector != nil:
+ a = struct {
+ NodeSelector EndpointSelector `json:"nodeSelector,omitempty"`
+ common
+ }{
+ NodeSelector: r.NodeSelector,
+ common: ruleCommon,
+ }
+ }
+
+ return json.Marshal(a)
+}
+
+func (r *Rule) DeepEqual(o *Rule) bool {
+ switch {
+ case (r == nil) != (o == nil):
+ return false
+ case (r == nil) && (o == nil):
+ return true
+ }
+ return r.deepEqual(o)
+}
+
+// NewRule builds a new rule with no selector and no policy.
+func NewRule() *Rule {
+ return &Rule{}
+}
+
+// WithEndpointSelector configures the Rule with the specified selector.
+func (r *Rule) WithEndpointSelector(es EndpointSelector) *Rule {
+ r.EndpointSelector = es
+ return r
+}
+
+// WithIngressRules configures the Rule with the specified rules.
+func (r *Rule) WithIngressRules(rules []IngressRule) *Rule {
+ r.Ingress = rules
+ return r
+}
+
+// WithIngressDenyRules configures the Rule with the specified rules.
+func (r *Rule) WithIngressDenyRules(rules []IngressDenyRule) *Rule {
+ r.IngressDeny = rules
+ return r
+}
+
+// WithEgressRules configures the Rule with the specified rules.
+func (r *Rule) WithEgressRules(rules []EgressRule) *Rule {
+ r.Egress = rules
+ return r
+}
+
+// WithEgressDenyRules configures the Rule with the specified rules.
+func (r *Rule) WithEgressDenyRules(rules []EgressDenyRule) *Rule {
+ r.EgressDeny = rules
+ return r
+}
+
+// WithLabels configures the Rule with the specified labels metadata.
+func (r *Rule) WithLabels(labels labels.LabelArray) *Rule {
+ r.Labels = labels
+ return r
+}
+
+// WithDescription configures the Rule with the specified description metadata.
+func (r *Rule) WithDescription(desc string) *Rule {
+ r.Description = desc
+ return r
+}
+
+// RequiresDerivative it return true if the rule has a derivative rule.
+func (r *Rule) RequiresDerivative() bool {
+ for _, rule := range r.Egress {
+ if rule.RequiresDerivative() {
+ return true
+ }
+ }
+ for _, rule := range r.EgressDeny {
+ if rule.RequiresDerivative() {
+ return true
+ }
+ }
+ return false
+}
+
+// CreateDerivative will return a new Rule with the new data based gather
+// by the rules that autogenerated new Rule
+func (r *Rule) CreateDerivative(ctx context.Context) (*Rule, error) {
+ newRule := r.DeepCopy()
+ newRule.Egress = []EgressRule{}
+ newRule.EgressDeny = []EgressDenyRule{}
+
+ for _, egressRule := range r.Egress {
+ derivativeEgressRule, err := egressRule.CreateDerivative(ctx)
+ if err != nil {
+ return newRule, err
+ }
+ newRule.Egress = append(newRule.Egress, *derivativeEgressRule)
+ }
+
+ for _, egressDenyRule := range r.EgressDeny {
+ derivativeEgressDenyRule, err := egressDenyRule.CreateDerivative(ctx)
+ if err != nil {
+ return newRule, err
+ }
+ newRule.EgressDeny = append(newRule.EgressDeny, *derivativeEgressDenyRule)
+ }
+ return newRule, nil
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/policy/api/rule_validation.go b/vendor/github.com/cilium/cilium/pkg/policy/api/rule_validation.go
new file mode 100644
index 000000000..e012dc5ed
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/policy/api/rule_validation.go
@@ -0,0 +1,508 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package api
+
+import (
+ "errors"
+ "fmt"
+ "net/netip"
+ "strconv"
+ "strings"
+
+ "github.com/cilium/cilium/pkg/iana"
+ "github.com/cilium/cilium/pkg/option"
+)
+
+const (
+ maxPorts = 40
+ maxICMPFields = 40
+)
+
+type exists struct{}
+
+// Sanitize validates and sanitizes a policy rule. Minor edits such as
+// capitalization of the protocol name are automatically fixed up. More
+// fundamental violations will cause an error to be returned.
+func (r Rule) Sanitize() error {
+ if r.EndpointSelector.LabelSelector == nil && r.NodeSelector.LabelSelector == nil {
+ return fmt.Errorf("rule must have one of EndpointSelector or NodeSelector")
+ }
+ if r.EndpointSelector.LabelSelector != nil && r.NodeSelector.LabelSelector != nil {
+ return fmt.Errorf("rule cannot have both EndpointSelector and NodeSelector")
+ }
+
+ if r.EndpointSelector.LabelSelector != nil {
+ if err := r.EndpointSelector.sanitize(); err != nil {
+ return err
+ }
+ }
+
+ var hostPolicy bool
+ if r.NodeSelector.LabelSelector != nil {
+ if err := r.NodeSelector.sanitize(); err != nil {
+ return err
+ }
+ hostPolicy = true
+ }
+
+ for i := range r.Ingress {
+ if err := r.Ingress[i].sanitize(); err != nil {
+ return err
+ }
+ if hostPolicy {
+ if len(countL7Rules(r.Ingress[i].ToPorts)) > 0 {
+ return fmt.Errorf("host policies do not support L7 rules yet")
+ }
+ }
+ }
+
+ for i := range r.Egress {
+ if err := r.Egress[i].sanitize(); err != nil {
+ return err
+ }
+ if hostPolicy {
+ if len(countL7Rules(r.Egress[i].ToPorts)) > 0 {
+ return fmt.Errorf("host policies do not support L7 rules yet")
+ }
+ }
+ }
+
+ return nil
+}
+
+func countL7Rules(ports []PortRule) map[string]int {
+ result := make(map[string]int)
+ for _, port := range ports {
+ if !port.Rules.IsEmpty() {
+ result["DNS"] += len(port.Rules.DNS)
+ result["HTTP"] += len(port.Rules.HTTP)
+ result["Kafka"] += len(port.Rules.Kafka)
+ }
+ }
+ return result
+}
+
+func (i *IngressRule) sanitize() error {
+ l3Members := map[string]int{
+ "FromEndpoints": len(i.FromEndpoints),
+ "FromCIDR": len(i.FromCIDR),
+ "FromCIDRSet": len(i.FromCIDRSet),
+ "FromEntities": len(i.FromEntities),
+ }
+ l7Members := countL7Rules(i.ToPorts)
+ l7IngressSupport := map[string]bool{
+ "DNS": false,
+ "Kafka": true,
+ "HTTP": true,
+ }
+
+ for m1 := range l3Members {
+ for m2 := range l3Members {
+ if m2 != m1 && l3Members[m1] > 0 && l3Members[m2] > 0 {
+ return fmt.Errorf("Combining %s and %s is not supported yet", m1, m2)
+ }
+ }
+ }
+
+ if len(l7Members) > 0 && !option.Config.EnableL7Proxy {
+ return errors.New("L7 policy is not supported since L7 proxy is not enabled")
+ }
+ for member := range l7Members {
+ if l7Members[member] > 0 && !l7IngressSupport[member] {
+ return fmt.Errorf("L7 protocol %s is not supported on ingress yet", member)
+ }
+ }
+
+ if len(i.ICMPs) > 0 && !option.Config.EnableICMPRules {
+ return fmt.Errorf("ICMP rules can only be applied when the %q flag is set", option.EnableICMPRules)
+ }
+
+ if len(i.ICMPs) > 0 && len(i.ToPorts) > 0 {
+ return fmt.Errorf("The ICMPs block may only be present without ToPorts. Define a separate rule to use ToPorts.")
+ }
+
+ for _, es := range i.FromEndpoints {
+ if err := es.sanitize(); err != nil {
+ return err
+ }
+ }
+
+ for _, es := range i.FromRequires {
+ if err := es.sanitize(); err != nil {
+ return err
+ }
+ }
+
+ for n := range i.ToPorts {
+ if err := i.ToPorts[n].sanitize(true); err != nil {
+ return err
+ }
+ }
+
+ for n := range i.ICMPs {
+ if err := i.ICMPs[n].verify(); err != nil {
+ return err
+ }
+ }
+
+ prefixLengths := map[int]exists{}
+ for n := range i.FromCIDR {
+ prefixLength, err := i.FromCIDR[n].sanitize()
+ if err != nil {
+ return err
+ }
+ prefixLengths[prefixLength] = exists{}
+ }
+
+ for n := range i.FromCIDRSet {
+ prefixLength, err := i.FromCIDRSet[n].sanitize()
+ if err != nil {
+ return err
+ }
+ prefixLengths[prefixLength] = exists{}
+ }
+
+ for _, fromEntity := range i.FromEntities {
+ _, ok := EntitySelectorMapping[fromEntity]
+ if !ok {
+ return fmt.Errorf("unsupported entity: %s", fromEntity)
+ }
+ }
+
+ i.SetAggregatedSelectors()
+
+ return nil
+}
+
+func (e *EgressRule) sanitize() error {
+ l3Members := map[string]int{
+ "ToCIDR": len(e.ToCIDR),
+ "ToCIDRSet": len(e.ToCIDRSet),
+ "ToEndpoints": len(e.ToEndpoints),
+ "ToEntities": len(e.ToEntities),
+ "ToServices": len(e.ToServices),
+ "ToFQDNs": len(e.ToFQDNs),
+ "ToGroups": len(e.ToGroups),
+ }
+ l3DependentL4Support := map[interface{}]bool{
+ "ToCIDR": true,
+ "ToCIDRSet": true,
+ "ToEndpoints": true,
+ "ToEntities": true,
+ "ToServices": false, // see https://github.com/cilium/cilium/issues/20067
+ "ToFQDNs": true,
+ "ToGroups": true,
+ }
+ l7Members := countL7Rules(e.ToPorts)
+ l7EgressSupport := map[string]bool{
+ "DNS": true,
+ "Kafka": true,
+ "HTTP": true,
+ }
+
+ for m1 := range l3Members {
+ for m2 := range l3Members {
+ if m2 != m1 && l3Members[m1] > 0 && l3Members[m2] > 0 {
+ return fmt.Errorf("Combining %s and %s is not supported yet", m1, m2)
+ }
+ }
+ }
+ for member := range l3Members {
+ if l3Members[member] > 0 && len(e.ToPorts) > 0 && !l3DependentL4Support[member] {
+ return fmt.Errorf("Combining %s and ToPorts is not supported yet", member)
+ }
+ }
+
+ if len(l7Members) > 0 && !option.Config.EnableL7Proxy {
+ return errors.New("L7 policy is not supported since L7 proxy is not enabled")
+ }
+ for member := range l7Members {
+ if l7Members[member] > 0 && !l7EgressSupport[member] {
+ return fmt.Errorf("L7 protocol %s is not supported on egress yet", member)
+ }
+ }
+
+ if len(e.ICMPs) > 0 && !option.Config.EnableICMPRules {
+ return fmt.Errorf("ICMP rules can only be applied when the %q flag is set", option.EnableICMPRules)
+ }
+
+ if len(e.ICMPs) > 0 && len(e.ToPorts) > 0 {
+ return fmt.Errorf("The ICMPs block may only be present without ToPorts. Define a separate rule to use ToPorts.")
+ }
+
+ for _, es := range e.ToEndpoints {
+ if err := es.sanitize(); err != nil {
+ return err
+ }
+ }
+
+ for _, es := range e.ToRequires {
+ if err := es.sanitize(); err != nil {
+ return err
+ }
+ }
+
+ for i := range e.ToPorts {
+ if err := e.ToPorts[i].sanitize(false); err != nil {
+ return err
+ }
+ }
+
+ for n := range e.ICMPs {
+ if err := e.ICMPs[n].verify(); err != nil {
+ return err
+ }
+ }
+
+ prefixLengths := map[int]exists{}
+ for i := range e.ToCIDR {
+ prefixLength, err := e.ToCIDR[i].sanitize()
+ if err != nil {
+ return err
+ }
+ prefixLengths[prefixLength] = exists{}
+ }
+ for i := range e.ToCIDRSet {
+ prefixLength, err := e.ToCIDRSet[i].sanitize()
+ if err != nil {
+ return err
+ }
+ prefixLengths[prefixLength] = exists{}
+ }
+
+ for _, toEntity := range e.ToEntities {
+ _, ok := EntitySelectorMapping[toEntity]
+ if !ok {
+ return fmt.Errorf("unsupported entity: %s", toEntity)
+ }
+ }
+
+ for i := range e.ToFQDNs {
+ err := e.ToFQDNs[i].sanitize()
+ if err != nil {
+ return err
+ }
+ }
+
+ e.SetAggregatedSelectors()
+
+ return nil
+}
+
+func (pr *L7Rules) sanitize(ports []PortProtocol) error {
+ nTypes := 0
+
+ if pr.HTTP != nil {
+ nTypes++
+ for i := range pr.HTTP {
+ if err := pr.HTTP[i].Sanitize(); err != nil {
+ return err
+ }
+ }
+ }
+
+ if pr.Kafka != nil {
+ nTypes++
+ for i := range pr.Kafka {
+ if err := pr.Kafka[i].Sanitize(); err != nil {
+ return err
+ }
+ }
+ }
+
+ if pr.DNS != nil {
+ // Forthcoming TPROXY redirection restricts DNS proxy to the standard DNS port (53).
+ // Require the port 53 be explicitly configured, and disallow other port numbers.
+ if len(ports) == 0 {
+ return fmt.Errorf("Port 53 must be specified for DNS rules")
+ }
+
+ nTypes++
+ for i := range pr.DNS {
+ if err := pr.DNS[i].Sanitize(); err != nil {
+ return err
+ }
+ }
+ }
+
+ if pr.L7 != nil && pr.L7Proto == "" {
+ return fmt.Errorf("'l7' may only be specified when a 'l7proto' is also specified")
+ }
+ if pr.L7Proto != "" {
+ nTypes++
+ for i := range pr.L7 {
+ if err := pr.L7[i].Sanitize(); err != nil {
+ return err
+ }
+ }
+ }
+
+ if nTypes > 1 {
+ return fmt.Errorf("multiple L7 protocol rule types specified in single rule")
+ }
+ return nil
+}
+
+func (pr *PortRule) sanitize(ingress bool) error {
+ hasDNSRules := pr.Rules != nil && len(pr.Rules.DNS) > 0
+ if ingress && hasDNSRules {
+ return fmt.Errorf("DNS rules are not allowed on ingress")
+ }
+
+ if len(pr.ServerNames) > 0 && !pr.Rules.IsEmpty() && pr.TerminatingTLS == nil {
+ return fmt.Errorf("ServerNames are not allowed with L7 rules without TLS termination")
+ }
+ for _, sn := range pr.ServerNames {
+ if sn == "" {
+ return fmt.Errorf("Empty server name is not allowed")
+ }
+ }
+
+ if len(pr.Ports) > maxPorts {
+ return fmt.Errorf("too many ports, the max is %d", maxPorts)
+ }
+ haveZeroPort := false
+ for i := range pr.Ports {
+ var isZero bool
+ var err error
+ if isZero, err = pr.Ports[i].sanitize(); err != nil {
+ return err
+ }
+ if isZero {
+ haveZeroPort = true
+ }
+ // DNS L7 rules can be TCP, UDP or ANY, all others are TCP only.
+ switch {
+ case pr.Rules.IsEmpty(), hasDNSRules:
+ // nothing to do if no rules OR they are DNS rules (note the comma above)
+ case pr.Ports[i].Protocol != ProtoTCP:
+ return fmt.Errorf("L7 rules can only apply to TCP (not %s) except for DNS rules", pr.Ports[i].Protocol)
+ }
+ }
+
+ listener := pr.Listener
+ if listener != nil {
+ // For now we have only tested custom listener support on the egress path. TODO
+ // (jrajahalme): Lift this limitation in follow-up work once proper testing has been
+ // done on the ingress path.
+ if ingress {
+ return fmt.Errorf("Listener is not allowed on ingress (%s)", listener.Name)
+ }
+ // There is no quarantee that Listener will support Cilium policy enforcement. Even
+ // now proxylib-based enforcement (e.g, Kafka) may work, but has not been tested.
+ // TODO (jrajahalme): Lift this limitation in follow-up work for proxylib based
+ // parsers if needed and when tested.
+ if !pr.Rules.IsEmpty() {
+ return fmt.Errorf("Listener is not allowed with L7 rules (%s)", listener.Name)
+ }
+ }
+
+ // Sanitize L7 rules
+ if !pr.Rules.IsEmpty() {
+ if haveZeroPort {
+ return fmt.Errorf("L7 rules can not be used when a port is 0")
+ }
+
+ if err := pr.Rules.sanitize(pr.Ports); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (pp *PortProtocol) sanitize() (isZero bool, err error) {
+ if pp.Port == "" {
+ return isZero, fmt.Errorf("Port must be specified")
+ }
+
+ // Port names are formatted as IANA Service Names. This means that
+ // some legal numeric literals are no longer considered numbers, e.g,
+ // 0x10 is now considered a name rather than number 16.
+ if iana.IsSvcName(pp.Port) {
+ pp.Port = strings.ToLower(pp.Port) // Normalize for case insensitive comparison
+ } else {
+ p, err := strconv.ParseUint(pp.Port, 0, 16)
+ if err != nil {
+ return isZero, fmt.Errorf("Unable to parse port: %s", err)
+ }
+ isZero = p == 0
+ }
+
+ pp.Protocol, err = ParseL4Proto(string(pp.Protocol))
+ return isZero, err
+}
+
+func (ir *ICMPRule) verify() error {
+ if len(ir.Fields) > maxICMPFields {
+ return fmt.Errorf("too many types, the max is %d", maxICMPFields)
+ }
+
+ for _, f := range ir.Fields {
+ if f.Family != IPv4Family && f.Family != IPv6Family && f.Family != "" {
+ return fmt.Errorf("wrong family: %s", f.Family)
+ }
+ }
+
+ return nil
+}
+
+// sanitize the given CIDR. If successful, returns the prefixLength specified
+// in the cidr and nil. Otherwise, returns (0, nil).
+func (c CIDR) sanitize() (prefixLength int, err error) {
+ strCIDR := string(c)
+ if strCIDR == "" {
+ return 0, fmt.Errorf("IP must be specified")
+ }
+
+ prefix, err := netip.ParsePrefix(strCIDR)
+ if err != nil {
+ _, err := netip.ParseAddr(strCIDR)
+ if err != nil {
+ return 0, fmt.Errorf("unable to parse CIDR: %s", err)
+ }
+ return prefixLength, nil
+ }
+ prefixLength = prefix.Bits()
+ if prefixLength < 0 {
+ return 0, fmt.Errorf("CIDR cannot specify non-contiguous mask %s", prefix)
+ }
+
+ return prefixLength, nil
+}
+
+// sanitize validates a CIDRRule by checking that the CIDR prefix itself is
+// valid, and ensuring that all of the exception CIDR prefixes are contained
+// within the allowed CIDR prefix.
+func (c *CIDRRule) sanitize() (prefixLength int, err error) {
+ // Only allow notation /. Note that this differs from
+ // the logic in api.CIDR.Sanitize().
+ prefix, err := netip.ParsePrefix(string(c.Cidr))
+ if err != nil {
+ return 0, fmt.Errorf("Unable to parse CIDRRule %q: %s", c.Cidr, err)
+ }
+
+ prefixLength = prefix.Bits()
+ if prefixLength < 0 {
+ return 0, fmt.Errorf("CIDR cannot specify non-contiguous mask %s", prefix)
+ }
+
+ // Ensure that each provided exception CIDR prefix is formatted correctly,
+ // and is contained within the CIDR prefix to/from which we want to allow
+ // traffic.
+ for _, p := range c.ExceptCIDRs {
+ except, err := netip.ParsePrefix(string(p))
+ if err != nil {
+ return 0, err
+ }
+
+ // Note: this also checks that the allow CIDR prefix and the exception
+ // CIDR prefixes are part of the same address family.
+ if !prefix.Contains(except.Addr()) {
+ return 0, fmt.Errorf("allow CIDR prefix %s does not contain "+
+ "exclude CIDR prefix %s", c.Cidr, p)
+ }
+ }
+
+ return prefixLength, nil
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/policy/api/rules.go b/vendor/github.com/cilium/cilium/pkg/policy/api/rules.go
new file mode 100644
index 000000000..aa69904ea
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/policy/api/rules.go
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package api
+
+import (
+ "fmt"
+ "strings"
+)
+
+// Rules is a collection of api.Rule.
+//
+// All rules must be evaluated in order to come to a conclusion. While
+// it is sufficient to have a single fromEndpoints rule match, none of
+// the fromRequires may be violated at the same time.
+// +deepequal-gen:private-method=true
+type Rules []*Rule
+
+func (rs Rules) String() string {
+ strRules := make([]string, 0, len(rs))
+
+ for _, r := range rs {
+ strRules = append(strRules, fmt.Sprintf("%+v", r))
+ }
+
+ return "[" + strings.Join(strRules, ",\n") + "]"
+}
+
+// DeepEqual is a deepequal function, deeply comparing the
+// receiver with other. the receiver must be non-nil.
+func (rs *Rules) DeepEqual(other *Rules) bool {
+ switch {
+ case (rs == nil) != (other == nil):
+ return false
+ case (rs == nil) && (other == nil):
+ return true
+ }
+ return rs.deepEqual(other)
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/policy/api/selector.go b/vendor/github.com/cilium/cilium/pkg/policy/api/selector.go
new file mode 100644
index 000000000..5a5cf29be
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/policy/api/selector.go
@@ -0,0 +1,388 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package api
+
+import (
+ "encoding/json"
+ "fmt"
+ "strings"
+
+ k8sLbls "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/labels"
+ slim_metav1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1"
+ validation "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/validation"
+ "github.com/cilium/cilium/pkg/labels"
+ "github.com/cilium/cilium/pkg/logging"
+ "github.com/cilium/cilium/pkg/logging/logfields"
+ "github.com/cilium/cilium/pkg/metrics"
+)
+
+var log = logging.DefaultLogger.WithField(logfields.LogSubsys, "policy-api")
+
+// EndpointSelector is a wrapper for k8s LabelSelector.
+type EndpointSelector struct {
+ *slim_metav1.LabelSelector `json:",inline"`
+
+ // requirements provides a cache for a k8s-friendly format of the
+ // LabelSelector, which allows more efficient matching in Matches().
+ //
+ // Kept as a pointer to allow EndpointSelector to be used as a map key.
+ requirements *k8sLbls.Requirements `json:"-"`
+
+ // cachedLabelSelectorString is the cached representation of the
+ // LabelSelector for this EndpointSelector. It is populated when
+ // EndpointSelectors are created via `NewESFromMatchRequirements`. It is
+ // immutable after its creation.
+ cachedLabelSelectorString string `json:"-"`
+}
+
+// LabelSelectorString returns a user-friendly string representation of
+// EndpointSelector.
+func (n *EndpointSelector) LabelSelectorString() string {
+ if n != nil && n.LabelSelector == nil {
+ return ""
+ }
+ return slim_metav1.FormatLabelSelector(n.LabelSelector)
+}
+
+// String returns a string representation of EndpointSelector.
+func (n EndpointSelector) String() string {
+ j, _ := n.MarshalJSON()
+ return string(j)
+}
+
+// CachedString returns the cached string representation of the LabelSelector
+// for this EndpointSelector.
+func (n EndpointSelector) CachedString() string {
+ return n.cachedLabelSelectorString
+}
+
+// UnmarshalJSON unmarshals the endpoint selector from the byte array.
+func (n *EndpointSelector) UnmarshalJSON(b []byte) error {
+ n.LabelSelector = &slim_metav1.LabelSelector{}
+ err := json.Unmarshal(b, n.LabelSelector)
+ if err != nil {
+ return err
+ }
+ if n.MatchLabels != nil {
+ ml := map[string]string{}
+ for k, v := range n.MatchLabels {
+ ml[labels.GetExtendedKeyFrom(k)] = v
+ }
+ n.MatchLabels = ml
+ }
+ if n.MatchExpressions != nil {
+ newMatchExpr := make([]slim_metav1.LabelSelectorRequirement, len(n.MatchExpressions))
+ for i, v := range n.MatchExpressions {
+ v.Key = labels.GetExtendedKeyFrom(v.Key)
+ newMatchExpr[i] = v
+ }
+ n.MatchExpressions = newMatchExpr
+ }
+ n.requirements = labelSelectorToRequirements(n.LabelSelector)
+ n.cachedLabelSelectorString = n.LabelSelector.String()
+ return nil
+}
+
+// MarshalJSON returns a JSON representation of the byte array.
+func (n EndpointSelector) MarshalJSON() ([]byte, error) {
+ ls := slim_metav1.LabelSelector{}
+
+ if n.LabelSelector == nil {
+ return json.Marshal(ls)
+ }
+
+ if n.MatchLabels != nil {
+ newLabels := map[string]string{}
+ for k, v := range n.MatchLabels {
+ newLabels[labels.GetCiliumKeyFrom(k)] = v
+ }
+ ls.MatchLabels = newLabels
+ }
+ if n.MatchExpressions != nil {
+ newMatchExpr := make([]slim_metav1.LabelSelectorRequirement, len(n.MatchExpressions))
+ for i, v := range n.MatchExpressions {
+ v.Key = labels.GetCiliumKeyFrom(v.Key)
+ newMatchExpr[i] = v
+ }
+ ls.MatchExpressions = newMatchExpr
+ }
+ return json.Marshal(ls)
+}
+
+// HasKeyPrefix checks if the endpoint selector contains the given key prefix in
+// its MatchLabels map and MatchExpressions slice.
+func (n EndpointSelector) HasKeyPrefix(prefix string) bool {
+ for k := range n.MatchLabels {
+ if strings.HasPrefix(k, prefix) {
+ return true
+ }
+ }
+ for _, v := range n.MatchExpressions {
+ if strings.HasPrefix(v.Key, prefix) {
+ return true
+ }
+ }
+ return false
+}
+
+// HasKey checks if the endpoint selector contains the given key in
+// its MatchLabels map or in its MatchExpressions slice.
+func (n EndpointSelector) HasKey(key string) bool {
+ if _, ok := n.MatchLabels[key]; ok {
+ return true
+ }
+ for _, v := range n.MatchExpressions {
+ if v.Key == key {
+ return true
+ }
+ }
+ return false
+}
+
+// GetMatch checks for a match on the specified key, and returns the value that
+// the key must match, and true. If a match cannot be found, returns nil, false.
+func (n EndpointSelector) GetMatch(key string) ([]string, bool) {
+ if value, ok := n.MatchLabels[key]; ok {
+ return []string{value}, true
+ }
+ for _, v := range n.MatchExpressions {
+ if v.Key == key && v.Operator == slim_metav1.LabelSelectorOpIn {
+ return v.Values, true
+ }
+ }
+ return nil, false
+}
+
+// labelSelectorToRequirements turns a kubernetes Selector into a slice of
+// requirements equivalent to the selector. These are cached internally in the
+// EndpointSelector to speed up Matches().
+//
+// This validates the labels, which can be expensive (and may fail..)
+// If there's an error, the selector will be nil and the Matches()
+// implementation will refuse to match any labels.
+func labelSelectorToRequirements(labelSelector *slim_metav1.LabelSelector) *k8sLbls.Requirements {
+ selector, err := slim_metav1.LabelSelectorAsSelector(labelSelector)
+ if err != nil {
+ metrics.PolicyChangeTotal.WithLabelValues(metrics.LabelValueOutcomeFail).Inc()
+ log.WithError(err).WithField(logfields.EndpointLabelSelector,
+ logfields.Repr(labelSelector)).Error("unable to construct selector in label selector")
+ return nil
+ }
+ metrics.PolicyChangeTotal.WithLabelValues(metrics.LabelValueOutcomeSuccess).Inc()
+
+ requirements, selectable := selector.Requirements()
+ if !selectable {
+ return nil
+ }
+ return &requirements
+}
+
+// NewESFromLabels creates a new endpoint selector from the given labels.
+func NewESFromLabels(lbls ...labels.Label) EndpointSelector {
+ ml := map[string]string{}
+ for _, lbl := range lbls {
+ ml[lbl.GetExtendedKey()] = lbl.Value
+ }
+
+ return NewESFromMatchRequirements(ml, nil)
+}
+
+// NewESFromMatchRequirements creates a new endpoint selector from the given
+// match specifications: An optional set of labels that must match, and
+// an optional slice of LabelSelectorRequirements.
+//
+// If the caller intends to reuse 'matchLabels' or 'reqs' after creating the
+// EndpointSelector, they must make a copy of the parameter.
+func NewESFromMatchRequirements(matchLabels map[string]string, reqs []slim_metav1.LabelSelectorRequirement) EndpointSelector {
+ labelSelector := &slim_metav1.LabelSelector{
+ MatchLabels: matchLabels,
+ MatchExpressions: reqs,
+ }
+ return EndpointSelector{
+ LabelSelector: labelSelector,
+ requirements: labelSelectorToRequirements(labelSelector),
+ cachedLabelSelectorString: labelSelector.String(),
+ }
+}
+
+// SyncRequirementsWithLabelSelector ensures that the requirements within the
+// specified EndpointSelector are in sync with the LabelSelector. This is
+// because the LabelSelector has publicly accessible fields, which can be
+// updated without concurrently updating the requirements, so the two fields can
+// become out of sync.
+func (n *EndpointSelector) SyncRequirementsWithLabelSelector() {
+ n.requirements = labelSelectorToRequirements(n.LabelSelector)
+}
+
+// newReservedEndpointSelector returns a selector that matches on all
+// endpoints with the specified reserved label.
+func newReservedEndpointSelector(ID string) EndpointSelector {
+ reservedLabels := labels.NewLabel(ID, "", labels.LabelSourceReserved)
+ return NewESFromLabels(reservedLabels)
+}
+
+var (
+ // WildcardEndpointSelector is a wildcard endpoint selector matching
+ // all endpoints that can be described with labels.
+ WildcardEndpointSelector = NewESFromLabels()
+
+ // ReservedEndpointSelectors map reserved labels to EndpointSelectors
+ // that will match those endpoints.
+ ReservedEndpointSelectors = map[string]EndpointSelector{
+ labels.IDNameHost: newReservedEndpointSelector(labels.IDNameHost),
+ labels.IDNameRemoteNode: newReservedEndpointSelector(labels.IDNameRemoteNode),
+ labels.IDNameWorld: newReservedEndpointSelector(labels.IDNameWorld),
+ labels.IDNameWorldIPv4: newReservedEndpointSelector(labels.IDNameWorldIPv4),
+ labels.IDNameWorldIPv6: newReservedEndpointSelector(labels.IDNameWorldIPv6),
+ }
+)
+
+// NewESFromK8sLabelSelector returns a new endpoint selector from the label
+// where it the given srcPrefix will be encoded in the label's keys.
+func NewESFromK8sLabelSelector(srcPrefix string, lss ...*slim_metav1.LabelSelector) EndpointSelector {
+ var (
+ matchLabels map[string]string
+ matchExpressions []slim_metav1.LabelSelectorRequirement
+ )
+ for _, ls := range lss {
+ if ls == nil {
+ continue
+ }
+ if ls.MatchLabels != nil {
+ if matchLabels == nil {
+ matchLabels = map[string]string{}
+ }
+ for k, v := range ls.MatchLabels {
+ matchLabels[srcPrefix+k] = v
+ }
+ }
+ if ls.MatchExpressions != nil {
+ if matchExpressions == nil {
+ matchExpressions = make([]slim_metav1.LabelSelectorRequirement, 0, len(ls.MatchExpressions))
+ }
+ for _, v := range ls.MatchExpressions {
+ v.Key = srcPrefix + v.Key
+ matchExpressions = append(matchExpressions, v)
+ }
+ }
+ }
+ return NewESFromMatchRequirements(matchLabels, matchExpressions)
+}
+
+// AddMatch adds a match for 'key' == 'value' to the endpoint selector.
+func (n *EndpointSelector) AddMatch(key, value string) {
+ if n.MatchLabels == nil {
+ n.MatchLabels = map[string]string{}
+ }
+ n.MatchLabels[key] = value
+ n.requirements = labelSelectorToRequirements(n.LabelSelector)
+ n.cachedLabelSelectorString = n.LabelSelector.String()
+}
+
+// AddMatchExpression adds a match expression to label selector of the endpoint selector.
+func (n *EndpointSelector) AddMatchExpression(key string, op slim_metav1.LabelSelectorOperator, values []string) {
+ n.MatchExpressions = append(n.MatchExpressions, slim_metav1.LabelSelectorRequirement{
+ Key: key,
+ Operator: op,
+ Values: values,
+ })
+
+ // Update cache of the EndopintSelector from the embedded label selector.
+ // This is to make sure we have updates caches containing the required selectors.
+ n.requirements = labelSelectorToRequirements(n.LabelSelector)
+ n.cachedLabelSelectorString = n.LabelSelector.String()
+}
+
+// Matches returns true if the endpoint selector Matches the `lblsToMatch`.
+// Returns always true if the endpoint selector contains the reserved label for
+// "all".
+func (n *EndpointSelector) Matches(lblsToMatch k8sLbls.Labels) bool {
+ // Try to update cached requirements for this EndpointSelector if possible.
+ if n.requirements == nil {
+ n.requirements = labelSelectorToRequirements(n.LabelSelector)
+ // Nil indicates that requirements failed validation in some way,
+ // so we cannot parse the labels for matching purposes; thus, we cannot
+ // match if labels cannot be parsed, so return false.
+ if n.requirements == nil {
+ return false
+ }
+ }
+ for _, req := range *n.requirements {
+ if !req.Matches(lblsToMatch) {
+ return false
+ }
+ }
+ return true
+}
+
+// IsWildcard returns true if the endpoint selector selects all endpoints.
+func (n *EndpointSelector) IsWildcard() bool {
+ return n.LabelSelector != nil &&
+ len(n.LabelSelector.MatchLabels)+len(n.LabelSelector.MatchExpressions) == 0
+}
+
+// ConvertToLabelSelectorRequirementSlice converts the MatchLabels and
+// MatchExpressions within the specified EndpointSelector into a list of
+// LabelSelectorRequirements.
+func (n *EndpointSelector) ConvertToLabelSelectorRequirementSlice() []slim_metav1.LabelSelectorRequirement {
+ requirements := make([]slim_metav1.LabelSelectorRequirement, 0, len(n.MatchExpressions)+len(n.MatchLabels))
+ // Append already existing match expressions.
+ requirements = append(requirements, n.MatchExpressions...)
+ // Convert each MatchLables to LabelSelectorRequirement.
+ for key, value := range n.MatchLabels {
+ requirementFromMatchLabels := slim_metav1.LabelSelectorRequirement{
+ Key: key,
+ Operator: slim_metav1.LabelSelectorOpIn,
+ Values: []string{value},
+ }
+ requirements = append(requirements, requirementFromMatchLabels)
+ }
+ return requirements
+}
+
+// sanitize returns an error if the EndpointSelector's LabelSelector is invalid.
+func (n *EndpointSelector) sanitize() error {
+ errList := validation.ValidateLabelSelector(n.LabelSelector, validation.LabelSelectorValidationOptions{AllowInvalidLabelValueInSelector: false}, nil)
+ if len(errList) > 0 {
+ return fmt.Errorf("invalid label selector: %s", errList.ToAggregate().Error())
+ }
+ return nil
+}
+
+// EndpointSelectorSlice is a slice of EndpointSelectors that can be sorted.
+type EndpointSelectorSlice []EndpointSelector
+
+func (s EndpointSelectorSlice) Len() int { return len(s) }
+func (s EndpointSelectorSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+func (s EndpointSelectorSlice) Less(i, j int) bool {
+ strI := s[i].LabelSelectorString()
+ strJ := s[j].LabelSelectorString()
+
+ return strings.Compare(strI, strJ) < 0
+}
+
+// Matches returns true if any of the EndpointSelectors in the slice match the
+// provided labels
+func (s EndpointSelectorSlice) Matches(ctx labels.LabelArray) bool {
+ for _, selector := range s {
+ if selector.Matches(ctx) {
+ return true
+ }
+ }
+
+ return false
+}
+
+// SelectsAllEndpoints returns whether the EndpointSelectorSlice selects all
+// endpoints, which is true if the wildcard endpoint selector is present in the
+// slice.
+func (s EndpointSelectorSlice) SelectsAllEndpoints() bool {
+ for _, selector := range s {
+ if selector.IsWildcard() {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/policy/api/service.go b/vendor/github.com/cilium/cilium/pkg/policy/api/service.go
new file mode 100644
index 000000000..57868a367
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/policy/api/service.go
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package api
+
+// ServiceSelector is a label selector for k8s services
+type ServiceSelector EndpointSelector
+
+// Service wraps around selectors for services
+type Service struct {
+ // K8sServiceSelector selects services by k8s labels and namespace
+ K8sServiceSelector *K8sServiceSelectorNamespace `json:"k8sServiceSelector,omitempty"`
+ // K8sService selects service by name and namespace pair
+ K8sService *K8sServiceNamespace `json:"k8sService,omitempty"`
+}
+
+// K8sServiceNamespace is an abstraction for the k8s service + namespace types.
+type K8sServiceNamespace struct {
+ ServiceName string `json:"serviceName,omitempty"`
+ Namespace string `json:"namespace,omitempty"`
+}
+
+// K8sServiceSelectorNamespace wraps service selector with namespace
+type K8sServiceSelectorNamespace struct {
+ // +kubebuilder:validation:Required
+ Selector ServiceSelector `json:"selector"`
+ Namespace string `json:"namespace,omitempty"`
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/policy/api/utils.go b/vendor/github.com/cilium/cilium/pkg/policy/api/utils.go
new file mode 100644
index 000000000..7424d3400
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/policy/api/utils.go
@@ -0,0 +1,172 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package api
+
+import (
+ "fmt"
+ "strings"
+)
+
+// Exists returns true if the HTTP rule already exists in the list of rules
+func (h *PortRuleHTTP) Exists(rules L7Rules) bool {
+ for _, existingRule := range rules.HTTP {
+ if h.Equal(existingRule) {
+ return true
+ }
+ }
+
+ return false
+}
+
+// Equal returns true if both HTTP rules are equal
+func (h *PortRuleHTTP) Equal(o PortRuleHTTP) bool {
+ if h.Path != o.Path ||
+ h.Method != o.Method ||
+ h.Host != o.Host ||
+ len(h.Headers) != len(o.Headers) ||
+ len(h.HeaderMatches) != len(o.HeaderMatches) {
+ return false
+ }
+
+ for i, value := range h.Headers {
+ if o.Headers[i] != value {
+ return false
+ }
+ }
+
+ for i, value := range h.HeaderMatches {
+ if !o.HeaderMatches[i].Equal(value) {
+ return false
+ }
+ }
+ return true
+}
+
+// Equal returns true if both Secrets are equal
+func (a *Secret) Equal(b *Secret) bool {
+ return a == nil && b == nil || a != nil && b != nil && *a == *b
+}
+
+// Equal returns true if both HeaderMatches are equal
+func (h *HeaderMatch) Equal(o *HeaderMatch) bool {
+ if h.Mismatch != o.Mismatch ||
+ h.Name != o.Name ||
+ h.Value != o.Value ||
+ !h.Secret.Equal(o.Secret) {
+ return false
+ }
+ return true
+}
+
+// Exists returns true if the DNS rule already exists in the list of rules
+func (d *PortRuleDNS) Exists(rules L7Rules) bool {
+ for _, existingRule := range rules.DNS {
+ if d.Equal(existingRule) {
+ return true
+ }
+ }
+
+ return false
+}
+
+// Exists returns true if the L7 rule already exists in the list of rules
+func (h *PortRuleL7) Exists(rules L7Rules) bool {
+ for _, existingRule := range rules.L7 {
+ if h.Equal(existingRule) {
+ return true
+ }
+ }
+
+ return false
+}
+
+// Equal returns true if both rules are equal
+func (d *PortRuleDNS) Equal(o PortRuleDNS) bool {
+ return d != nil && d.MatchName == o.MatchName && d.MatchPattern == o.MatchPattern
+}
+
+// Equal returns true if both L7 rules are equal
+func (h *PortRuleL7) Equal(o PortRuleL7) bool {
+ if len(*h) != len(o) {
+ return false
+ }
+ for k, v := range *h {
+ if v2, ok := o[k]; !ok || v2 != v {
+ return false
+ }
+ }
+ return true
+}
+
+// Validate returns an error if the layer 4 protocol is not valid
+func (l4 L4Proto) Validate() error {
+ switch l4 {
+ case ProtoAny, ProtoTCP, ProtoUDP, ProtoSCTP:
+ default:
+ return fmt.Errorf("invalid protocol %q, must be { tcp | udp | sctp | any }", l4)
+ }
+
+ return nil
+}
+
+// ParseL4Proto parses a string as layer 4 protocol
+func ParseL4Proto(proto string) (L4Proto, error) {
+ if proto == "" {
+ return ProtoAny, nil
+ }
+
+ p := L4Proto(strings.ToUpper(proto))
+ return p, p.Validate()
+}
+
+// ResourceQualifiedName returns the qualified name of an Envoy resource,
+// prepending CEC namespace and CEC name to the resource name and using
+// '/' as a separator.
+//
+// If resourceName already has a slash, it must be of the form 'namespace/name', where namespace
+// usually is equal to 'namespace'. This also applies for clusterwide resources for which
+// 'namespace' is empty.
+//
+// If 'resourceName' has no slash, it will be prepended with 'namespace/cecName' so that the
+// full name passed to Envoy is 'namespace/cecName/resourceName'. This makes non-qualified resource
+// names and resource name references local to the given namespace and CiliumEnvoyConfig CRD.
+//
+// if 'forceNamespace' is 'true' then resourceName is always prepended with "namespace/cecName/",
+// even it it already has backslashes, unless the first component of the name is equal to
+// 'namespace'.
+//
+// As a special case pass through an empty resourceName without qualification so that unnamed
+// resources do not become named. This is important to not transform an invalid Envoy configuration
+// to a valid one with a fake name.
+
+type Option int
+
+const (
+ ForceNamespace Option = iota
+)
+
+func ResourceQualifiedName(namespace, cecName, resourceName string, options ...Option) string {
+ forceNamespace := false
+ for _, option := range options {
+ switch option {
+ case ForceNamespace:
+ forceNamespace = true
+ }
+ }
+
+ idx := strings.IndexRune(resourceName, '/')
+ if resourceName == "" || idx >= 0 && (!forceNamespace || (idx == len(namespace) && strings.HasPrefix(resourceName, namespace))) {
+ return resourceName
+ }
+
+ var sb strings.Builder
+
+ sb.WriteString(namespace)
+ sb.WriteRune('/')
+ sb.WriteString(cecName)
+ sb.WriteRune('/')
+ sb.WriteString(resourceName)
+
+ return sb.String()
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/policy/api/zz_generated.deepcopy.go b/vendor/github.com/cilium/cilium/pkg/policy/api/zz_generated.deepcopy.go
new file mode 100644
index 000000000..7d1b0b70b
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/policy/api/zz_generated.deepcopy.go
@@ -0,0 +1,1080 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package api
+
+import (
+ labels "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/labels"
+ v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1"
+ kafka "github.com/cilium/proxy/pkg/policy/api/kafka"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AWSGroup) DeepCopyInto(out *AWSGroup) {
+ *out = *in
+ if in.Labels != nil {
+ in, out := &in.Labels, &out.Labels
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.SecurityGroupsIds != nil {
+ in, out := &in.SecurityGroupsIds, &out.SecurityGroupsIds
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.SecurityGroupsNames != nil {
+ in, out := &in.SecurityGroupsNames, &out.SecurityGroupsNames
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSGroup.
+func (in *AWSGroup) DeepCopy() *AWSGroup {
+ if in == nil {
+ return nil
+ }
+ out := new(AWSGroup)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Authentication) DeepCopyInto(out *Authentication) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Authentication.
+func (in *Authentication) DeepCopy() *Authentication {
+ if in == nil {
+ return nil
+ }
+ out := new(Authentication)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CIDRRule) DeepCopyInto(out *CIDRRule) {
+ *out = *in
+ if in.ExceptCIDRs != nil {
+ in, out := &in.ExceptCIDRs, &out.ExceptCIDRs
+ *out = make([]CIDR, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CIDRRule.
+func (in *CIDRRule) DeepCopy() *CIDRRule {
+ if in == nil {
+ return nil
+ }
+ out := new(CIDRRule)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in CIDRRuleSlice) DeepCopyInto(out *CIDRRuleSlice) {
+ {
+ in := &in
+ *out = make(CIDRRuleSlice, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ return
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CIDRRuleSlice.
+func (in CIDRRuleSlice) DeepCopy() CIDRRuleSlice {
+ if in == nil {
+ return nil
+ }
+ out := new(CIDRRuleSlice)
+ in.DeepCopyInto(out)
+ return *out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in CIDRSlice) DeepCopyInto(out *CIDRSlice) {
+ {
+ in := &in
+ *out = make(CIDRSlice, len(*in))
+ copy(*out, *in)
+ return
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CIDRSlice.
+func (in CIDRSlice) DeepCopy() CIDRSlice {
+ if in == nil {
+ return nil
+ }
+ out := new(CIDRSlice)
+ in.DeepCopyInto(out)
+ return *out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EgressCommonRule) DeepCopyInto(out *EgressCommonRule) {
+ *out = *in
+ if in.ToEndpoints != nil {
+ in, out := &in.ToEndpoints, &out.ToEndpoints
+ *out = make([]EndpointSelector, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.ToRequires != nil {
+ in, out := &in.ToRequires, &out.ToRequires
+ *out = make([]EndpointSelector, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.ToCIDR != nil {
+ in, out := &in.ToCIDR, &out.ToCIDR
+ *out = make(CIDRSlice, len(*in))
+ copy(*out, *in)
+ }
+ if in.ToCIDRSet != nil {
+ in, out := &in.ToCIDRSet, &out.ToCIDRSet
+ *out = make(CIDRRuleSlice, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.ToEntities != nil {
+ in, out := &in.ToEntities, &out.ToEntities
+ *out = make(EntitySlice, len(*in))
+ copy(*out, *in)
+ }
+ if in.ToServices != nil {
+ in, out := &in.ToServices, &out.ToServices
+ *out = make([]Service, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.ToGroups != nil {
+ in, out := &in.ToGroups, &out.ToGroups
+ *out = make([]ToGroups, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.aggregatedSelectors != nil {
+ in, out := &in.aggregatedSelectors, &out.aggregatedSelectors
+ *out = make(EndpointSelectorSlice, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressCommonRule.
+func (in *EgressCommonRule) DeepCopy() *EgressCommonRule {
+ if in == nil {
+ return nil
+ }
+ out := new(EgressCommonRule)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EgressDenyRule) DeepCopyInto(out *EgressDenyRule) {
+ *out = *in
+ in.EgressCommonRule.DeepCopyInto(&out.EgressCommonRule)
+ if in.ToPorts != nil {
+ in, out := &in.ToPorts, &out.ToPorts
+ *out = make(PortDenyRules, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.ICMPs != nil {
+ in, out := &in.ICMPs, &out.ICMPs
+ *out = make(ICMPRules, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressDenyRule.
+func (in *EgressDenyRule) DeepCopy() *EgressDenyRule {
+ if in == nil {
+ return nil
+ }
+ out := new(EgressDenyRule)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EgressRule) DeepCopyInto(out *EgressRule) {
+ *out = *in
+ in.EgressCommonRule.DeepCopyInto(&out.EgressCommonRule)
+ if in.ToPorts != nil {
+ in, out := &in.ToPorts, &out.ToPorts
+ *out = make(PortRules, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.ToFQDNs != nil {
+ in, out := &in.ToFQDNs, &out.ToFQDNs
+ *out = make(FQDNSelectorSlice, len(*in))
+ copy(*out, *in)
+ }
+ if in.ICMPs != nil {
+ in, out := &in.ICMPs, &out.ICMPs
+ *out = make(ICMPRules, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Authentication != nil {
+ in, out := &in.Authentication, &out.Authentication
+ *out = new(Authentication)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressRule.
+func (in *EgressRule) DeepCopy() *EgressRule {
+ if in == nil {
+ return nil
+ }
+ out := new(EgressRule)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EndpointSelector) DeepCopyInto(out *EndpointSelector) {
+ *out = *in
+ if in.LabelSelector != nil {
+ in, out := &in.LabelSelector, &out.LabelSelector
+ *out = new(v1.LabelSelector)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.requirements != nil {
+ in, out := &in.requirements, &out.requirements
+ *out = new(labels.Requirements)
+ if **in != nil {
+ in, out := *in, *out
+ *out = make([]labels.Requirement, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointSelector.
+func (in *EndpointSelector) DeepCopy() *EndpointSelector {
+ if in == nil {
+ return nil
+ }
+ out := new(EndpointSelector)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in EndpointSelectorSlice) DeepCopyInto(out *EndpointSelectorSlice) {
+ {
+ in := &in
+ *out = make(EndpointSelectorSlice, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ return
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointSelectorSlice.
+func (in EndpointSelectorSlice) DeepCopy() EndpointSelectorSlice {
+ if in == nil {
+ return nil
+ }
+ out := new(EndpointSelectorSlice)
+ in.DeepCopyInto(out)
+ return *out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in EntitySlice) DeepCopyInto(out *EntitySlice) {
+ {
+ in := &in
+ *out = make(EntitySlice, len(*in))
+ copy(*out, *in)
+ return
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EntitySlice.
+func (in EntitySlice) DeepCopy() EntitySlice {
+ if in == nil {
+ return nil
+ }
+ out := new(EntitySlice)
+ in.DeepCopyInto(out)
+ return *out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EnvoyConfig) DeepCopyInto(out *EnvoyConfig) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvoyConfig.
+func (in *EnvoyConfig) DeepCopy() *EnvoyConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(EnvoyConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *FQDNSelector) DeepCopyInto(out *FQDNSelector) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FQDNSelector.
+func (in *FQDNSelector) DeepCopy() *FQDNSelector {
+ if in == nil {
+ return nil
+ }
+ out := new(FQDNSelector)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in FQDNSelectorSlice) DeepCopyInto(out *FQDNSelectorSlice) {
+ {
+ in := &in
+ *out = make(FQDNSelectorSlice, len(*in))
+ copy(*out, *in)
+ return
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FQDNSelectorSlice.
+func (in FQDNSelectorSlice) DeepCopy() FQDNSelectorSlice {
+ if in == nil {
+ return nil
+ }
+ out := new(FQDNSelectorSlice)
+ in.DeepCopyInto(out)
+ return *out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HeaderMatch) DeepCopyInto(out *HeaderMatch) {
+ *out = *in
+ if in.Secret != nil {
+ in, out := &in.Secret, &out.Secret
+ *out = new(Secret)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HeaderMatch.
+func (in *HeaderMatch) DeepCopy() *HeaderMatch {
+ if in == nil {
+ return nil
+ }
+ out := new(HeaderMatch)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ICMPField) DeepCopyInto(out *ICMPField) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ICMPField.
+func (in *ICMPField) DeepCopy() *ICMPField {
+ if in == nil {
+ return nil
+ }
+ out := new(ICMPField)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ICMPRule) DeepCopyInto(out *ICMPRule) {
+ *out = *in
+ if in.Fields != nil {
+ in, out := &in.Fields, &out.Fields
+ *out = make([]ICMPField, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ICMPRule.
+func (in *ICMPRule) DeepCopy() *ICMPRule {
+ if in == nil {
+ return nil
+ }
+ out := new(ICMPRule)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in ICMPRules) DeepCopyInto(out *ICMPRules) {
+ {
+ in := &in
+ *out = make(ICMPRules, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ return
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ICMPRules.
+func (in ICMPRules) DeepCopy() ICMPRules {
+ if in == nil {
+ return nil
+ }
+ out := new(ICMPRules)
+ in.DeepCopyInto(out)
+ return *out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IngressCommonRule) DeepCopyInto(out *IngressCommonRule) {
+ *out = *in
+ if in.FromEndpoints != nil {
+ in, out := &in.FromEndpoints, &out.FromEndpoints
+ *out = make([]EndpointSelector, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.FromRequires != nil {
+ in, out := &in.FromRequires, &out.FromRequires
+ *out = make([]EndpointSelector, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.FromCIDR != nil {
+ in, out := &in.FromCIDR, &out.FromCIDR
+ *out = make(CIDRSlice, len(*in))
+ copy(*out, *in)
+ }
+ if in.FromCIDRSet != nil {
+ in, out := &in.FromCIDRSet, &out.FromCIDRSet
+ *out = make(CIDRRuleSlice, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.FromEntities != nil {
+ in, out := &in.FromEntities, &out.FromEntities
+ *out = make(EntitySlice, len(*in))
+ copy(*out, *in)
+ }
+ if in.aggregatedSelectors != nil {
+ in, out := &in.aggregatedSelectors, &out.aggregatedSelectors
+ *out = make(EndpointSelectorSlice, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressCommonRule.
+func (in *IngressCommonRule) DeepCopy() *IngressCommonRule {
+ if in == nil {
+ return nil
+ }
+ out := new(IngressCommonRule)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IngressDenyRule) DeepCopyInto(out *IngressDenyRule) {
+ *out = *in
+ in.IngressCommonRule.DeepCopyInto(&out.IngressCommonRule)
+ if in.ToPorts != nil {
+ in, out := &in.ToPorts, &out.ToPorts
+ *out = make(PortDenyRules, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.ICMPs != nil {
+ in, out := &in.ICMPs, &out.ICMPs
+ *out = make(ICMPRules, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressDenyRule.
+func (in *IngressDenyRule) DeepCopy() *IngressDenyRule {
+ if in == nil {
+ return nil
+ }
+ out := new(IngressDenyRule)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IngressRule) DeepCopyInto(out *IngressRule) {
+ *out = *in
+ in.IngressCommonRule.DeepCopyInto(&out.IngressCommonRule)
+ if in.ToPorts != nil {
+ in, out := &in.ToPorts, &out.ToPorts
+ *out = make(PortRules, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.ICMPs != nil {
+ in, out := &in.ICMPs, &out.ICMPs
+ *out = make(ICMPRules, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Authentication != nil {
+ in, out := &in.Authentication, &out.Authentication
+ *out = new(Authentication)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressRule.
+func (in *IngressRule) DeepCopy() *IngressRule {
+ if in == nil {
+ return nil
+ }
+ out := new(IngressRule)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *K8sServiceNamespace) DeepCopyInto(out *K8sServiceNamespace) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K8sServiceNamespace.
+func (in *K8sServiceNamespace) DeepCopy() *K8sServiceNamespace {
+ if in == nil {
+ return nil
+ }
+ out := new(K8sServiceNamespace)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *K8sServiceSelectorNamespace) DeepCopyInto(out *K8sServiceSelectorNamespace) {
+ *out = *in
+ in.Selector.DeepCopyInto(&out.Selector)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K8sServiceSelectorNamespace.
+func (in *K8sServiceSelectorNamespace) DeepCopy() *K8sServiceSelectorNamespace {
+ if in == nil {
+ return nil
+ }
+ out := new(K8sServiceSelectorNamespace)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *L7Rules) DeepCopyInto(out *L7Rules) {
+ *out = *in
+ if in.HTTP != nil {
+ in, out := &in.HTTP, &out.HTTP
+ *out = make([]PortRuleHTTP, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Kafka != nil {
+ in, out := &in.Kafka, &out.Kafka
+ *out = make([]kafka.PortRule, len(*in))
+ copy(*out, *in)
+ }
+ if in.DNS != nil {
+ in, out := &in.DNS, &out.DNS
+ *out = make([]PortRuleDNS, len(*in))
+ copy(*out, *in)
+ }
+ if in.L7 != nil {
+ in, out := &in.L7, &out.L7
+ *out = make([]PortRuleL7, len(*in))
+ for i := range *in {
+ if (*in)[i] != nil {
+ in, out := &(*in)[i], &(*out)[i]
+ *out = make(PortRuleL7, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new L7Rules.
+func (in *L7Rules) DeepCopy() *L7Rules {
+ if in == nil {
+ return nil
+ }
+ out := new(L7Rules)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Listener) DeepCopyInto(out *Listener) {
+ *out = *in
+ if in.EnvoyConfig != nil {
+ in, out := &in.EnvoyConfig, &out.EnvoyConfig
+ *out = new(EnvoyConfig)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Listener.
+func (in *Listener) DeepCopy() *Listener {
+ if in == nil {
+ return nil
+ }
+ out := new(Listener)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PortDenyRule) DeepCopyInto(out *PortDenyRule) {
+ *out = *in
+ if in.Ports != nil {
+ in, out := &in.Ports, &out.Ports
+ *out = make([]PortProtocol, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortDenyRule.
+func (in *PortDenyRule) DeepCopy() *PortDenyRule {
+ if in == nil {
+ return nil
+ }
+ out := new(PortDenyRule)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in PortDenyRules) DeepCopyInto(out *PortDenyRules) {
+ {
+ in := &in
+ *out = make(PortDenyRules, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ return
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortDenyRules.
+func (in PortDenyRules) DeepCopy() PortDenyRules {
+ if in == nil {
+ return nil
+ }
+ out := new(PortDenyRules)
+ in.DeepCopyInto(out)
+ return *out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PortProtocol) DeepCopyInto(out *PortProtocol) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortProtocol.
+func (in *PortProtocol) DeepCopy() *PortProtocol {
+ if in == nil {
+ return nil
+ }
+ out := new(PortProtocol)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PortRule) DeepCopyInto(out *PortRule) {
+ *out = *in
+ if in.Ports != nil {
+ in, out := &in.Ports, &out.Ports
+ *out = make([]PortProtocol, len(*in))
+ copy(*out, *in)
+ }
+ if in.TerminatingTLS != nil {
+ in, out := &in.TerminatingTLS, &out.TerminatingTLS
+ *out = new(TLSContext)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.OriginatingTLS != nil {
+ in, out := &in.OriginatingTLS, &out.OriginatingTLS
+ *out = new(TLSContext)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.ServerNames != nil {
+ in, out := &in.ServerNames, &out.ServerNames
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Listener != nil {
+ in, out := &in.Listener, &out.Listener
+ *out = new(Listener)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Rules != nil {
+ in, out := &in.Rules, &out.Rules
+ *out = new(L7Rules)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortRule.
+func (in *PortRule) DeepCopy() *PortRule {
+ if in == nil {
+ return nil
+ }
+ out := new(PortRule)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PortRuleDNS) DeepCopyInto(out *PortRuleDNS) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortRuleDNS.
+func (in *PortRuleDNS) DeepCopy() *PortRuleDNS {
+ if in == nil {
+ return nil
+ }
+ out := new(PortRuleDNS)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PortRuleHTTP) DeepCopyInto(out *PortRuleHTTP) {
+ *out = *in
+ if in.Headers != nil {
+ in, out := &in.Headers, &out.Headers
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.HeaderMatches != nil {
+ in, out := &in.HeaderMatches, &out.HeaderMatches
+ *out = make([]*HeaderMatch, len(*in))
+ for i := range *in {
+ if (*in)[i] != nil {
+ in, out := &(*in)[i], &(*out)[i]
+ *out = new(HeaderMatch)
+ (*in).DeepCopyInto(*out)
+ }
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortRuleHTTP.
+func (in *PortRuleHTTP) DeepCopy() *PortRuleHTTP {
+ if in == nil {
+ return nil
+ }
+ out := new(PortRuleHTTP)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in PortRuleL7) DeepCopyInto(out *PortRuleL7) {
+ {
+ in := &in
+ *out = make(PortRuleL7, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ return
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortRuleL7.
+func (in PortRuleL7) DeepCopy() PortRuleL7 {
+ if in == nil {
+ return nil
+ }
+ out := new(PortRuleL7)
+ in.DeepCopyInto(out)
+ return *out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in PortRules) DeepCopyInto(out *PortRules) {
+ {
+ in := &in
+ *out = make(PortRules, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ return
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortRules.
+func (in PortRules) DeepCopy() PortRules {
+ if in == nil {
+ return nil
+ }
+ out := new(PortRules)
+ in.DeepCopyInto(out)
+ return *out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Rule) DeepCopyInto(out *Rule) {
+ *out = *in
+ in.EndpointSelector.DeepCopyInto(&out.EndpointSelector)
+ in.NodeSelector.DeepCopyInto(&out.NodeSelector)
+ if in.Ingress != nil {
+ in, out := &in.Ingress, &out.Ingress
+ *out = make([]IngressRule, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.IngressDeny != nil {
+ in, out := &in.IngressDeny, &out.IngressDeny
+ *out = make([]IngressDenyRule, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Egress != nil {
+ in, out := &in.Egress, &out.Egress
+ *out = make([]EgressRule, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.EgressDeny != nil {
+ in, out := &in.EgressDeny, &out.EgressDeny
+ *out = make([]EgressDenyRule, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ out.Labels = in.Labels.DeepCopy()
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Rule.
+func (in *Rule) DeepCopy() *Rule {
+ if in == nil {
+ return nil
+ }
+ out := new(Rule)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in Rules) DeepCopyInto(out *Rules) {
+ {
+ in := &in
+ *out = make(Rules, len(*in))
+ for i := range *in {
+ if (*in)[i] != nil {
+ in, out := &(*in)[i], &(*out)[i]
+ *out = new(Rule)
+ (*in).DeepCopyInto(*out)
+ }
+ }
+ return
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Rules.
+func (in Rules) DeepCopy() Rules {
+ if in == nil {
+ return nil
+ }
+ out := new(Rules)
+ in.DeepCopyInto(out)
+ return *out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Secret) DeepCopyInto(out *Secret) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Secret.
+func (in *Secret) DeepCopy() *Secret {
+ if in == nil {
+ return nil
+ }
+ out := new(Secret)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Service) DeepCopyInto(out *Service) {
+ *out = *in
+ if in.K8sServiceSelector != nil {
+ in, out := &in.K8sServiceSelector, &out.K8sServiceSelector
+ *out = new(K8sServiceSelectorNamespace)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.K8sService != nil {
+ in, out := &in.K8sService, &out.K8sService
+ *out = new(K8sServiceNamespace)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Service.
+func (in *Service) DeepCopy() *Service {
+ if in == nil {
+ return nil
+ }
+ out := new(Service)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ServiceSelector) DeepCopyInto(out *ServiceSelector) {
+ *out = *in
+ if in.LabelSelector != nil {
+ in, out := &in.LabelSelector, &out.LabelSelector
+ *out = new(v1.LabelSelector)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.requirements != nil {
+ in, out := &in.requirements, &out.requirements
+ *out = new(labels.Requirements)
+ if **in != nil {
+ in, out := *in, *out
+ *out = make([]labels.Requirement, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceSelector.
+func (in *ServiceSelector) DeepCopy() *ServiceSelector {
+ if in == nil {
+ return nil
+ }
+ out := new(ServiceSelector)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TLSContext) DeepCopyInto(out *TLSContext) {
+ *out = *in
+ if in.Secret != nil {
+ in, out := &in.Secret, &out.Secret
+ *out = new(Secret)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSContext.
+func (in *TLSContext) DeepCopy() *TLSContext {
+ if in == nil {
+ return nil
+ }
+ out := new(TLSContext)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ToGroups) DeepCopyInto(out *ToGroups) {
+ *out = *in
+ if in.AWS != nil {
+ in, out := &in.AWS, &out.AWS
+ *out = new(AWSGroup)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ToGroups.
+func (in *ToGroups) DeepCopy() *ToGroups {
+ if in == nil {
+ return nil
+ }
+ out := new(ToGroups)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/policy/api/zz_generated.deepequal.go b/vendor/github.com/cilium/cilium/pkg/policy/api/zz_generated.deepequal.go
new file mode 100644
index 000000000..a6573025d
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/policy/api/zz_generated.deepequal.go
@@ -0,0 +1,1326 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Code generated by deepequal-gen. DO NOT EDIT.
+
+package api
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *AWSGroup) DeepEqual(other *AWSGroup) bool {
+ if other == nil {
+ return false
+ }
+
+ if ((in.Labels != nil) && (other.Labels != nil)) || ((in.Labels == nil) != (other.Labels == nil)) {
+ in, other := &in.Labels, &other.Labels
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for key, inValue := range *in {
+ if otherValue, present := (*other)[key]; !present {
+ return false
+ } else {
+ if inValue != otherValue {
+ return false
+ }
+ }
+ }
+ }
+ }
+
+ if ((in.SecurityGroupsIds != nil) && (other.SecurityGroupsIds != nil)) || ((in.SecurityGroupsIds == nil) != (other.SecurityGroupsIds == nil)) {
+ in, other := &in.SecurityGroupsIds, &other.SecurityGroupsIds
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if inElement != (*other)[i] {
+ return false
+ }
+ }
+ }
+ }
+
+ if ((in.SecurityGroupsNames != nil) && (other.SecurityGroupsNames != nil)) || ((in.SecurityGroupsNames == nil) != (other.SecurityGroupsNames == nil)) {
+ in, other := &in.SecurityGroupsNames, &other.SecurityGroupsNames
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if inElement != (*other)[i] {
+ return false
+ }
+ }
+ }
+ }
+
+ if in.Region != other.Region {
+ return false
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *Authentication) DeepEqual(other *Authentication) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.Mode != other.Mode {
+ return false
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *CIDRRule) DeepEqual(other *CIDRRule) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.Cidr != other.Cidr {
+ return false
+ }
+ if in.CIDRGroupRef != other.CIDRGroupRef {
+ return false
+ }
+ if ((in.ExceptCIDRs != nil) && (other.ExceptCIDRs != nil)) || ((in.ExceptCIDRs == nil) != (other.ExceptCIDRs == nil)) {
+ in, other := &in.ExceptCIDRs, &other.ExceptCIDRs
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if inElement != (*other)[i] {
+ return false
+ }
+ }
+ }
+ }
+
+ if in.Generated != other.Generated {
+ return false
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *CIDRRuleSlice) DeepEqual(other *CIDRRuleSlice) bool {
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if !inElement.DeepEqual(&(*other)[i]) {
+ return false
+ }
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *CIDRSlice) DeepEqual(other *CIDRSlice) bool {
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if inElement != (*other)[i] {
+ return false
+ }
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *EgressCommonRule) DeepEqual(other *EgressCommonRule) bool {
+ if other == nil {
+ return false
+ }
+
+ if ((in.ToEndpoints != nil) && (other.ToEndpoints != nil)) || ((in.ToEndpoints == nil) != (other.ToEndpoints == nil)) {
+ in, other := &in.ToEndpoints, &other.ToEndpoints
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if !inElement.DeepEqual(&(*other)[i]) {
+ return false
+ }
+ }
+ }
+ }
+
+ if ((in.ToRequires != nil) && (other.ToRequires != nil)) || ((in.ToRequires == nil) != (other.ToRequires == nil)) {
+ in, other := &in.ToRequires, &other.ToRequires
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if !inElement.DeepEqual(&(*other)[i]) {
+ return false
+ }
+ }
+ }
+ }
+
+ if ((in.ToCIDR != nil) && (other.ToCIDR != nil)) || ((in.ToCIDR == nil) != (other.ToCIDR == nil)) {
+ in, other := &in.ToCIDR, &other.ToCIDR
+ if other == nil || !in.DeepEqual(other) {
+ return false
+ }
+ }
+
+ if ((in.ToCIDRSet != nil) && (other.ToCIDRSet != nil)) || ((in.ToCIDRSet == nil) != (other.ToCIDRSet == nil)) {
+ in, other := &in.ToCIDRSet, &other.ToCIDRSet
+ if other == nil || !in.DeepEqual(other) {
+ return false
+ }
+ }
+
+ if ((in.ToEntities != nil) && (other.ToEntities != nil)) || ((in.ToEntities == nil) != (other.ToEntities == nil)) {
+ in, other := &in.ToEntities, &other.ToEntities
+ if other == nil || !in.DeepEqual(other) {
+ return false
+ }
+ }
+
+ if ((in.ToServices != nil) && (other.ToServices != nil)) || ((in.ToServices == nil) != (other.ToServices == nil)) {
+ in, other := &in.ToServices, &other.ToServices
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if !inElement.DeepEqual(&(*other)[i]) {
+ return false
+ }
+ }
+ }
+ }
+
+ if ((in.ToGroups != nil) && (other.ToGroups != nil)) || ((in.ToGroups == nil) != (other.ToGroups == nil)) {
+ in, other := &in.ToGroups, &other.ToGroups
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if !inElement.DeepEqual(&(*other)[i]) {
+ return false
+ }
+ }
+ }
+ }
+
+ if ((in.aggregatedSelectors != nil) && (other.aggregatedSelectors != nil)) || ((in.aggregatedSelectors == nil) != (other.aggregatedSelectors == nil)) {
+ in, other := &in.aggregatedSelectors, &other.aggregatedSelectors
+ if other == nil || !in.DeepEqual(other) {
+ return false
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *EgressDenyRule) DeepEqual(other *EgressDenyRule) bool {
+ if other == nil {
+ return false
+ }
+
+ if !in.EgressCommonRule.DeepEqual(&other.EgressCommonRule) {
+ return false
+ }
+
+ if ((in.ToPorts != nil) && (other.ToPorts != nil)) || ((in.ToPorts == nil) != (other.ToPorts == nil)) {
+ in, other := &in.ToPorts, &other.ToPorts
+ if other == nil || !in.DeepEqual(other) {
+ return false
+ }
+ }
+
+ if ((in.ICMPs != nil) && (other.ICMPs != nil)) || ((in.ICMPs == nil) != (other.ICMPs == nil)) {
+ in, other := &in.ICMPs, &other.ICMPs
+ if other == nil || !in.DeepEqual(other) {
+ return false
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *EgressRule) DeepEqual(other *EgressRule) bool {
+ if other == nil {
+ return false
+ }
+
+ if !in.EgressCommonRule.DeepEqual(&other.EgressCommonRule) {
+ return false
+ }
+
+ if ((in.ToPorts != nil) && (other.ToPorts != nil)) || ((in.ToPorts == nil) != (other.ToPorts == nil)) {
+ in, other := &in.ToPorts, &other.ToPorts
+ if other == nil || !in.DeepEqual(other) {
+ return false
+ }
+ }
+
+ if ((in.ToFQDNs != nil) && (other.ToFQDNs != nil)) || ((in.ToFQDNs == nil) != (other.ToFQDNs == nil)) {
+ in, other := &in.ToFQDNs, &other.ToFQDNs
+ if other == nil || !in.DeepEqual(other) {
+ return false
+ }
+ }
+
+ if ((in.ICMPs != nil) && (other.ICMPs != nil)) || ((in.ICMPs == nil) != (other.ICMPs == nil)) {
+ in, other := &in.ICMPs, &other.ICMPs
+ if other == nil || !in.DeepEqual(other) {
+ return false
+ }
+ }
+
+ if (in.Authentication == nil) != (other.Authentication == nil) {
+ return false
+ } else if in.Authentication != nil {
+ if !in.Authentication.DeepEqual(other.Authentication) {
+ return false
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *EndpointSelector) DeepEqual(other *EndpointSelector) bool {
+ if other == nil {
+ return false
+ }
+
+ if (in.LabelSelector == nil) != (other.LabelSelector == nil) {
+ return false
+ } else if in.LabelSelector != nil {
+ if !in.LabelSelector.DeepEqual(other.LabelSelector) {
+ return false
+ }
+ }
+
+ if (in.requirements == nil) != (other.requirements == nil) {
+ return false
+ } else if in.requirements != nil {
+ if !in.requirements.DeepEqual(other.requirements) {
+ return false
+ }
+ }
+
+ if in.cachedLabelSelectorString != other.cachedLabelSelectorString {
+ return false
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *EndpointSelectorSlice) DeepEqual(other *EndpointSelectorSlice) bool {
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if !inElement.DeepEqual(&(*other)[i]) {
+ return false
+ }
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *EntitySlice) DeepEqual(other *EntitySlice) bool {
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if inElement != (*other)[i] {
+ return false
+ }
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *EnvoyConfig) DeepEqual(other *EnvoyConfig) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.Kind != other.Kind {
+ return false
+ }
+ if in.Name != other.Name {
+ return false
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *FQDNSelector) DeepEqual(other *FQDNSelector) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.MatchName != other.MatchName {
+ return false
+ }
+ if in.MatchPattern != other.MatchPattern {
+ return false
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *FQDNSelectorSlice) DeepEqual(other *FQDNSelectorSlice) bool {
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if !inElement.DeepEqual(&(*other)[i]) {
+ return false
+ }
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *HeaderMatch) DeepEqual(other *HeaderMatch) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.Mismatch != other.Mismatch {
+ return false
+ }
+ if in.Name != other.Name {
+ return false
+ }
+ if (in.Secret == nil) != (other.Secret == nil) {
+ return false
+ } else if in.Secret != nil {
+ if !in.Secret.DeepEqual(other.Secret) {
+ return false
+ }
+ }
+
+ if in.Value != other.Value {
+ return false
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *ICMPField) DeepEqual(other *ICMPField) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.Family != other.Family {
+ return false
+ }
+ if in.Type != other.Type {
+ return false
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *ICMPRule) DeepEqual(other *ICMPRule) bool {
+ if other == nil {
+ return false
+ }
+
+ if ((in.Fields != nil) && (other.Fields != nil)) || ((in.Fields == nil) != (other.Fields == nil)) {
+ in, other := &in.Fields, &other.Fields
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if !inElement.DeepEqual(&(*other)[i]) {
+ return false
+ }
+ }
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *ICMPRules) DeepEqual(other *ICMPRules) bool {
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if !inElement.DeepEqual(&(*other)[i]) {
+ return false
+ }
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *IngressCommonRule) DeepEqual(other *IngressCommonRule) bool {
+ if other == nil {
+ return false
+ }
+
+ if ((in.FromEndpoints != nil) && (other.FromEndpoints != nil)) || ((in.FromEndpoints == nil) != (other.FromEndpoints == nil)) {
+ in, other := &in.FromEndpoints, &other.FromEndpoints
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if !inElement.DeepEqual(&(*other)[i]) {
+ return false
+ }
+ }
+ }
+ }
+
+ if ((in.FromRequires != nil) && (other.FromRequires != nil)) || ((in.FromRequires == nil) != (other.FromRequires == nil)) {
+ in, other := &in.FromRequires, &other.FromRequires
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if !inElement.DeepEqual(&(*other)[i]) {
+ return false
+ }
+ }
+ }
+ }
+
+ if ((in.FromCIDR != nil) && (other.FromCIDR != nil)) || ((in.FromCIDR == nil) != (other.FromCIDR == nil)) {
+ in, other := &in.FromCIDR, &other.FromCIDR
+ if other == nil || !in.DeepEqual(other) {
+ return false
+ }
+ }
+
+ if ((in.FromCIDRSet != nil) && (other.FromCIDRSet != nil)) || ((in.FromCIDRSet == nil) != (other.FromCIDRSet == nil)) {
+ in, other := &in.FromCIDRSet, &other.FromCIDRSet
+ if other == nil || !in.DeepEqual(other) {
+ return false
+ }
+ }
+
+ if ((in.FromEntities != nil) && (other.FromEntities != nil)) || ((in.FromEntities == nil) != (other.FromEntities == nil)) {
+ in, other := &in.FromEntities, &other.FromEntities
+ if other == nil || !in.DeepEqual(other) {
+ return false
+ }
+ }
+
+ if ((in.aggregatedSelectors != nil) && (other.aggregatedSelectors != nil)) || ((in.aggregatedSelectors == nil) != (other.aggregatedSelectors == nil)) {
+ in, other := &in.aggregatedSelectors, &other.aggregatedSelectors
+ if other == nil || !in.DeepEqual(other) {
+ return false
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *IngressDenyRule) DeepEqual(other *IngressDenyRule) bool {
+ if other == nil {
+ return false
+ }
+
+ if !in.IngressCommonRule.DeepEqual(&other.IngressCommonRule) {
+ return false
+ }
+
+ if ((in.ToPorts != nil) && (other.ToPorts != nil)) || ((in.ToPorts == nil) != (other.ToPorts == nil)) {
+ in, other := &in.ToPorts, &other.ToPorts
+ if other == nil || !in.DeepEqual(other) {
+ return false
+ }
+ }
+
+ if ((in.ICMPs != nil) && (other.ICMPs != nil)) || ((in.ICMPs == nil) != (other.ICMPs == nil)) {
+ in, other := &in.ICMPs, &other.ICMPs
+ if other == nil || !in.DeepEqual(other) {
+ return false
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *IngressRule) DeepEqual(other *IngressRule) bool {
+ if other == nil {
+ return false
+ }
+
+ if !in.IngressCommonRule.DeepEqual(&other.IngressCommonRule) {
+ return false
+ }
+
+ if ((in.ToPorts != nil) && (other.ToPorts != nil)) || ((in.ToPorts == nil) != (other.ToPorts == nil)) {
+ in, other := &in.ToPorts, &other.ToPorts
+ if other == nil || !in.DeepEqual(other) {
+ return false
+ }
+ }
+
+ if ((in.ICMPs != nil) && (other.ICMPs != nil)) || ((in.ICMPs == nil) != (other.ICMPs == nil)) {
+ in, other := &in.ICMPs, &other.ICMPs
+ if other == nil || !in.DeepEqual(other) {
+ return false
+ }
+ }
+
+ if (in.Authentication == nil) != (other.Authentication == nil) {
+ return false
+ } else if in.Authentication != nil {
+ if !in.Authentication.DeepEqual(other.Authentication) {
+ return false
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *K8sServiceNamespace) DeepEqual(other *K8sServiceNamespace) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.ServiceName != other.ServiceName {
+ return false
+ }
+ if in.Namespace != other.Namespace {
+ return false
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *K8sServiceSelectorNamespace) DeepEqual(other *K8sServiceSelectorNamespace) bool {
+ if other == nil {
+ return false
+ }
+
+ if !in.Selector.DeepEqual(&other.Selector) {
+ return false
+ }
+
+ if in.Namespace != other.Namespace {
+ return false
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *L7Rules) DeepEqual(other *L7Rules) bool {
+ if other == nil {
+ return false
+ }
+
+ if ((in.HTTP != nil) && (other.HTTP != nil)) || ((in.HTTP == nil) != (other.HTTP == nil)) {
+ in, other := &in.HTTP, &other.HTTP
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if !inElement.DeepEqual(&(*other)[i]) {
+ return false
+ }
+ }
+ }
+ }
+
+ if ((in.Kafka != nil) && (other.Kafka != nil)) || ((in.Kafka == nil) != (other.Kafka == nil)) {
+ in, other := &in.Kafka, &other.Kafka
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if !inElement.DeepEqual(&(*other)[i]) {
+ return false
+ }
+ }
+ }
+ }
+
+ if ((in.DNS != nil) && (other.DNS != nil)) || ((in.DNS == nil) != (other.DNS == nil)) {
+ in, other := &in.DNS, &other.DNS
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if !inElement.DeepEqual(&(*other)[i]) {
+ return false
+ }
+ }
+ }
+ }
+
+ if in.L7Proto != other.L7Proto {
+ return false
+ }
+ if ((in.L7 != nil) && (other.L7 != nil)) || ((in.L7 == nil) != (other.L7 == nil)) {
+ in, other := &in.L7, &other.L7
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if !inElement.DeepEqual(&(*other)[i]) {
+ return false
+ }
+ }
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *Listener) DeepEqual(other *Listener) bool {
+ if other == nil {
+ return false
+ }
+
+ if (in.EnvoyConfig == nil) != (other.EnvoyConfig == nil) {
+ return false
+ } else if in.EnvoyConfig != nil {
+ if !in.EnvoyConfig.DeepEqual(other.EnvoyConfig) {
+ return false
+ }
+ }
+
+ if in.Name != other.Name {
+ return false
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *PortDenyRule) DeepEqual(other *PortDenyRule) bool {
+ if other == nil {
+ return false
+ }
+
+ if ((in.Ports != nil) && (other.Ports != nil)) || ((in.Ports == nil) != (other.Ports == nil)) {
+ in, other := &in.Ports, &other.Ports
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if !inElement.DeepEqual(&(*other)[i]) {
+ return false
+ }
+ }
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *PortDenyRules) DeepEqual(other *PortDenyRules) bool {
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if !inElement.DeepEqual(&(*other)[i]) {
+ return false
+ }
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *PortProtocol) DeepEqual(other *PortProtocol) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.Port != other.Port {
+ return false
+ }
+ if in.Protocol != other.Protocol {
+ return false
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *PortRule) DeepEqual(other *PortRule) bool {
+ if other == nil {
+ return false
+ }
+
+ if ((in.Ports != nil) && (other.Ports != nil)) || ((in.Ports == nil) != (other.Ports == nil)) {
+ in, other := &in.Ports, &other.Ports
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if !inElement.DeepEqual(&(*other)[i]) {
+ return false
+ }
+ }
+ }
+ }
+
+ if (in.TerminatingTLS == nil) != (other.TerminatingTLS == nil) {
+ return false
+ } else if in.TerminatingTLS != nil {
+ if !in.TerminatingTLS.DeepEqual(other.TerminatingTLS) {
+ return false
+ }
+ }
+
+ if (in.OriginatingTLS == nil) != (other.OriginatingTLS == nil) {
+ return false
+ } else if in.OriginatingTLS != nil {
+ if !in.OriginatingTLS.DeepEqual(other.OriginatingTLS) {
+ return false
+ }
+ }
+
+ if ((in.ServerNames != nil) && (other.ServerNames != nil)) || ((in.ServerNames == nil) != (other.ServerNames == nil)) {
+ in, other := &in.ServerNames, &other.ServerNames
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if inElement != (*other)[i] {
+ return false
+ }
+ }
+ }
+ }
+
+ if (in.Listener == nil) != (other.Listener == nil) {
+ return false
+ } else if in.Listener != nil {
+ if !in.Listener.DeepEqual(other.Listener) {
+ return false
+ }
+ }
+
+ if (in.Rules == nil) != (other.Rules == nil) {
+ return false
+ } else if in.Rules != nil {
+ if !in.Rules.DeepEqual(other.Rules) {
+ return false
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *PortRuleDNS) DeepEqual(other *PortRuleDNS) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.MatchName != other.MatchName {
+ return false
+ }
+ if in.MatchPattern != other.MatchPattern {
+ return false
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *PortRuleHTTP) DeepEqual(other *PortRuleHTTP) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.Path != other.Path {
+ return false
+ }
+ if in.Method != other.Method {
+ return false
+ }
+ if in.Host != other.Host {
+ return false
+ }
+ if ((in.Headers != nil) && (other.Headers != nil)) || ((in.Headers == nil) != (other.Headers == nil)) {
+ in, other := &in.Headers, &other.Headers
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if inElement != (*other)[i] {
+ return false
+ }
+ }
+ }
+ }
+
+ if ((in.HeaderMatches != nil) && (other.HeaderMatches != nil)) || ((in.HeaderMatches == nil) != (other.HeaderMatches == nil)) {
+ in, other := &in.HeaderMatches, &other.HeaderMatches
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if !inElement.DeepEqual((*other)[i]) {
+ return false
+ }
+ }
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *PortRuleL7) DeepEqual(other *PortRuleL7) bool {
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for key, inValue := range *in {
+ if otherValue, present := (*other)[key]; !present {
+ return false
+ } else {
+ if inValue != otherValue {
+ return false
+ }
+ }
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *PortRules) DeepEqual(other *PortRules) bool {
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if !inElement.DeepEqual(&(*other)[i]) {
+ return false
+ }
+ }
+ }
+
+ return true
+}
+
+// deepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *Rule) deepEqual(other *Rule) bool {
+ if other == nil {
+ return false
+ }
+
+ if !in.EndpointSelector.DeepEqual(&other.EndpointSelector) {
+ return false
+ }
+
+ if !in.NodeSelector.DeepEqual(&other.NodeSelector) {
+ return false
+ }
+
+ if ((in.Ingress != nil) && (other.Ingress != nil)) || ((in.Ingress == nil) != (other.Ingress == nil)) {
+ in, other := &in.Ingress, &other.Ingress
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if !inElement.DeepEqual(&(*other)[i]) {
+ return false
+ }
+ }
+ }
+ }
+
+ if ((in.IngressDeny != nil) && (other.IngressDeny != nil)) || ((in.IngressDeny == nil) != (other.IngressDeny == nil)) {
+ in, other := &in.IngressDeny, &other.IngressDeny
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if !inElement.DeepEqual(&(*other)[i]) {
+ return false
+ }
+ }
+ }
+ }
+
+ if ((in.Egress != nil) && (other.Egress != nil)) || ((in.Egress == nil) != (other.Egress == nil)) {
+ in, other := &in.Egress, &other.Egress
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if !inElement.DeepEqual(&(*other)[i]) {
+ return false
+ }
+ }
+ }
+ }
+
+ if ((in.EgressDeny != nil) && (other.EgressDeny != nil)) || ((in.EgressDeny == nil) != (other.EgressDeny == nil)) {
+ in, other := &in.EgressDeny, &other.EgressDeny
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if !inElement.DeepEqual(&(*other)[i]) {
+ return false
+ }
+ }
+ }
+ }
+
+ if ((in.Labels != nil) && (other.Labels != nil)) || ((in.Labels == nil) != (other.Labels == nil)) {
+ in, other := &in.Labels, &other.Labels
+ if other == nil || !in.DeepEqual(other) {
+ return false
+ }
+ }
+
+ if in.Description != other.Description {
+ return false
+ }
+
+ return true
+}
+
+// deepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *Rules) deepEqual(other *Rules) bool {
+ if other == nil {
+ return false
+ }
+
+ if len(*in) != len(*other) {
+ return false
+ } else {
+ for i, inElement := range *in {
+ if !inElement.DeepEqual((*other)[i]) {
+ return false
+ }
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *Secret) DeepEqual(other *Secret) bool {
+ if other == nil {
+ return false
+ }
+
+ if in.Namespace != other.Namespace {
+ return false
+ }
+ if in.Name != other.Name {
+ return false
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *Service) DeepEqual(other *Service) bool {
+ if other == nil {
+ return false
+ }
+
+ if (in.K8sServiceSelector == nil) != (other.K8sServiceSelector == nil) {
+ return false
+ } else if in.K8sServiceSelector != nil {
+ if !in.K8sServiceSelector.DeepEqual(other.K8sServiceSelector) {
+ return false
+ }
+ }
+
+ if (in.K8sService == nil) != (other.K8sService == nil) {
+ return false
+ } else if in.K8sService != nil {
+ if !in.K8sService.DeepEqual(other.K8sService) {
+ return false
+ }
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *ServiceSelector) DeepEqual(other *ServiceSelector) bool {
+ if other == nil {
+ return false
+ }
+
+ if (in.LabelSelector == nil) != (other.LabelSelector == nil) {
+ return false
+ } else if in.LabelSelector != nil {
+ if !in.LabelSelector.DeepEqual(other.LabelSelector) {
+ return false
+ }
+ }
+
+ if (in.requirements == nil) != (other.requirements == nil) {
+ return false
+ } else if in.requirements != nil {
+ if !in.requirements.DeepEqual(other.requirements) {
+ return false
+ }
+ }
+
+ if in.cachedLabelSelectorString != other.cachedLabelSelectorString {
+ return false
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *TLSContext) DeepEqual(other *TLSContext) bool {
+ if other == nil {
+ return false
+ }
+
+ if (in.Secret == nil) != (other.Secret == nil) {
+ return false
+ } else if in.Secret != nil {
+ if !in.Secret.DeepEqual(other.Secret) {
+ return false
+ }
+ }
+
+ if in.TrustedCA != other.TrustedCA {
+ return false
+ }
+ if in.Certificate != other.Certificate {
+ return false
+ }
+ if in.PrivateKey != other.PrivateKey {
+ return false
+ }
+
+ return true
+}
+
+// DeepEqual is an autogenerated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+func (in *ToGroups) DeepEqual(other *ToGroups) bool {
+ if other == nil {
+ return false
+ }
+
+ if (in.AWS == nil) != (other.AWS == nil) {
+ return false
+ } else if in.AWS != nil {
+ if !in.AWS.DeepEqual(other.AWS) {
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/policy/cidr.go b/vendor/github.com/cilium/cilium/pkg/policy/cidr.go
new file mode 100644
index 000000000..33f14a9cd
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/policy/cidr.go
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package policy
+
+import (
+ "net/netip"
+
+ "github.com/cilium/cilium/pkg/ip"
+ "github.com/cilium/cilium/pkg/policy/api"
+)
+
+// getPrefixesFromCIDR fetches all CIDRs referred to by the specified slice
+// and returns them as regular golang CIDR objects.
+func getPrefixesFromCIDR(cidrs api.CIDRSlice) []netip.Prefix {
+ result, _, _ := ip.ParsePrefixes(cidrs.StringSlice())
+ return result
+}
+
+// GetPrefixesFromCIDRSet fetches all CIDRs referred to by the specified slice
+// and returns them as regular golang CIDR objects.
+//
+// Assumes that validation already occurred on 'rules'.
+func GetPrefixesFromCIDRSet(rules api.CIDRRuleSlice) []netip.Prefix {
+ cidrs := api.ComputeResultantCIDRSet(rules)
+ return getPrefixesFromCIDR(cidrs)
+}
+
+// GetCIDRPrefixes runs through the specified 'rules' to find every reference
+// to a CIDR in the rules, and returns a slice containing all of these CIDRs.
+// Multiple rules referring to the same CIDR will result in multiple copies of
+// the CIDR in the returned slice.
+//
+// Assumes that validation already occurred on 'rules'.
+func GetCIDRPrefixes(rules api.Rules) []netip.Prefix {
+ if len(rules) == 0 {
+ return nil
+ }
+ res := make([]netip.Prefix, 0, 32)
+ for _, r := range rules {
+ for _, ir := range r.Ingress {
+ if len(ir.FromCIDR) > 0 {
+ res = append(res, getPrefixesFromCIDR(ir.FromCIDR)...)
+ }
+ if len(ir.FromCIDRSet) > 0 {
+ res = append(res, GetPrefixesFromCIDRSet(ir.FromCIDRSet)...)
+ }
+ }
+ for _, ir := range r.IngressDeny {
+ if len(ir.FromCIDR) > 0 {
+ res = append(res, getPrefixesFromCIDR(ir.FromCIDR)...)
+ }
+ if len(ir.FromCIDRSet) > 0 {
+ res = append(res, GetPrefixesFromCIDRSet(ir.FromCIDRSet)...)
+ }
+ }
+ for _, er := range r.Egress {
+ if len(er.ToCIDR) > 0 {
+ res = append(res, getPrefixesFromCIDR(er.ToCIDR)...)
+ }
+ if len(er.ToCIDRSet) > 0 {
+ res = append(res, GetPrefixesFromCIDRSet(er.ToCIDRSet)...)
+ }
+ }
+ for _, er := range r.EgressDeny {
+ if len(er.ToCIDR) > 0 {
+ res = append(res, getPrefixesFromCIDR(er.ToCIDR)...)
+ }
+ if len(er.ToCIDRSet) > 0 {
+ res = append(res, GetPrefixesFromCIDRSet(er.ToCIDRSet)...)
+ }
+ }
+ }
+ return res
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/policy/config.go b/vendor/github.com/cilium/cilium/pkg/policy/config.go
new file mode 100644
index 000000000..77f035cb0
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/policy/config.go
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package policy
+
+import (
+ "time"
+
+ ipcacheTypes "github.com/cilium/cilium/pkg/ipcache/types"
+ "github.com/cilium/cilium/pkg/labels"
+ "github.com/cilium/cilium/pkg/lock"
+ "github.com/cilium/cilium/pkg/logging"
+ "github.com/cilium/cilium/pkg/logging/logfields"
+ "github.com/cilium/cilium/pkg/source"
+)
+
+var (
+ log = logging.DefaultLogger.WithField(logfields.LogSubsys, "policy")
+ mutex lock.RWMutex // Protects enablePolicy
+ enablePolicy string // Whether policy enforcement is enabled.
+)
+
+// SetPolicyEnabled sets the policy enablement configuration. Valid values are:
+// - endpoint.AlwaysEnforce
+// - endpoint.NeverEnforce
+// - endpoint.DefaultEnforcement
+func SetPolicyEnabled(val string) {
+ mutex.Lock()
+ enablePolicy = val
+ mutex.Unlock()
+}
+
+// GetPolicyEnabled returns the policy enablement configuration
+func GetPolicyEnabled() string {
+ mutex.RLock()
+ val := enablePolicy
+ mutex.RUnlock()
+ return val
+}
+
+// AddOptions are options which can be passed to PolicyAdd
+type AddOptions struct {
+ // Replace if true indicates that existing rules with identical labels should be replaced
+ Replace bool
+ // ReplaceWithLabels if present indicates that existing rules with the
+ // given LabelArray should be deleted.
+ ReplaceWithLabels labels.LabelArray
+ // Generated should be set as true to signalize a the policy being inserted
+ // was generated by cilium-agent, e.g. dns poller.
+ Generated bool
+
+ // The source of this policy, one of api, fqdn or k8s
+ Source source.Source
+
+ // The time the policy initially began to be processed in Cilium, such as when the
+ // policy was received from the API server.
+ ProcessingStartTime time.Time
+
+ // Resource provides the object ID for the underlying object that backs
+ // this information from 'source'.
+ Resource ipcacheTypes.ResourceID
+}
+
+// DeleteOptions are options which can be passed to PolicyDelete
+type DeleteOptions struct {
+ // The source of this policy, one of api, fqdn or k8s
+ Source source.Source
+
+ // Resource provides the object ID for the underlying object that backs
+ // this information from 'source'.
+ Resource ipcacheTypes.ResourceID
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/policy/distillery.go b/vendor/github.com/cilium/cilium/pkg/policy/distillery.go
new file mode 100644
index 000000000..408d478ad
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/policy/distillery.go
@@ -0,0 +1,238 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package policy
+
+import (
+ "fmt"
+ "sync/atomic"
+
+ identityPkg "github.com/cilium/cilium/pkg/identity"
+ "github.com/cilium/cilium/pkg/identity/identitymanager"
+ "github.com/cilium/cilium/pkg/lock"
+)
+
+// SelectorPolicy represents a cached selectorPolicy, previously resolved from
+// the policy repository and ready to be distilled against a set of identities
+// to compute datapath-level policy configuration.
+type SelectorPolicy interface {
+ // Consume returns the policy in terms of connectivity to peer
+ // Identities.
+ Consume(owner PolicyOwner) *EndpointPolicy
+}
+
+// PolicyCache represents a cache of resolved policies for identities.
+type PolicyCache struct {
+ lock.Mutex
+
+ // repo is a circular reference back to the Repository, but as
+ // we create only one Repository and one PolicyCache for each
+ // Cilium Agent process, these will never need to be garbage
+ // collected.
+ repo *Repository
+ policies map[identityPkg.NumericIdentity]*cachedSelectorPolicy
+}
+
+// NewPolicyCache creates a new cache of SelectorPolicy.
+func NewPolicyCache(repo *Repository, subscribe bool) *PolicyCache {
+ cache := &PolicyCache{
+ repo: repo,
+ policies: make(map[identityPkg.NumericIdentity]*cachedSelectorPolicy),
+ }
+ if subscribe {
+ identitymanager.Subscribe(cache)
+ }
+ return cache
+}
+
+func (cache *PolicyCache) GetSelectorCache() *SelectorCache {
+ return cache.repo.GetSelectorCache()
+}
+
+// lookupOrCreate adds the specified Identity to the policy cache, with a reference
+// from the specified Endpoint, then returns the threadsafe copy of the policy.
+func (cache *PolicyCache) lookupOrCreate(identity *identityPkg.Identity, create bool) SelectorPolicy {
+ cache.Lock()
+ defer cache.Unlock()
+ cip, ok := cache.policies[identity.ID]
+ if create && !ok {
+ cip = newCachedSelectorPolicy(identity, cache.repo.GetSelectorCache())
+ cache.policies[identity.ID] = cip
+ }
+ return cip
+}
+
+// insert adds the specified Identity to the policy cache, with a reference
+// from the specified Endpoint, then returns the threadsafe copy of the policy.
+func (cache *PolicyCache) insert(identity *identityPkg.Identity) SelectorPolicy {
+ return cache.lookupOrCreate(identity, true)
+}
+
+// delete forgets about any cached SelectorPolicy that this endpoint uses.
+//
+// Returns true if the SelectorPolicy was removed from the cache.
+func (cache *PolicyCache) delete(identity *identityPkg.Identity) bool {
+ cache.Lock()
+ defer cache.Unlock()
+ cip, ok := cache.policies[identity.ID]
+ if ok {
+ delete(cache.policies, identity.ID)
+ cip.getPolicy().Detach()
+ }
+ return ok
+}
+
+// updateSelectorPolicy resolves the policy for the security identity of the
+// specified endpoint and stores it internally. It will skip policy resolution
+// if the cached policy is already at the revision specified in the repo.
+//
+// Returns whether the cache was updated, or an error.
+//
+// Must be called with repo.Mutex held for reading.
+func (cache *PolicyCache) updateSelectorPolicy(identity *identityPkg.Identity) (bool, error) {
+ cache.Lock()
+ cip, ok := cache.policies[identity.ID]
+ cache.Unlock()
+ if !ok {
+ return false, fmt.Errorf("SelectorPolicy not found in cache for ID %d", identity.ID)
+ }
+
+ // As long as UpdatePolicy() is triggered from endpoint
+ // regeneration, it's possible for two endpoints with the
+ // *same* identity to race to update the policy here. Such
+ // racing would lead to first of the endpoints using a
+ // selectorPolicy that is already detached from the selector
+ // cache, and thus not getting any incremental updates.
+ //
+ // Lock the 'cip' for the duration of the revision check and
+ // the possible policy update.
+ cip.Lock()
+ defer cip.Unlock()
+
+ // Don't resolve policy if it was already done for this or later revision.
+ if cip.getPolicy().Revision >= cache.repo.GetRevision() {
+ return false, nil
+ }
+
+ // Resolve the policies, which could fail
+ selPolicy, err := cache.repo.resolvePolicyLocked(identity)
+ if err != nil {
+ return false, err
+ }
+
+ cip.setPolicy(selPolicy)
+
+ return true, nil
+}
+
+// LocalEndpointIdentityAdded creates a SelectorPolicy cache entry for the
+// specified Identity, without calculating any policy for it.
+func (cache *PolicyCache) LocalEndpointIdentityAdded(identity *identityPkg.Identity) {
+ cache.insert(identity)
+}
+
+// LocalEndpointIdentityRemoved deletes the cached SelectorPolicy for the
+// specified Identity.
+func (cache *PolicyCache) LocalEndpointIdentityRemoved(identity *identityPkg.Identity) {
+ cache.delete(identity)
+}
+
+// Lookup attempts to locate the SelectorPolicy corresponding to the specified
+// identity. If policy is not cached for the identity, it returns nil.
+func (cache *PolicyCache) Lookup(identity *identityPkg.Identity) SelectorPolicy {
+ return cache.lookupOrCreate(identity, false)
+}
+
+// UpdatePolicy resolves the policy for the security identity of the specified
+// endpoint and caches it for future use.
+//
+// The caller must provide threadsafety for iteration over the policy
+// repository.
+func (cache *PolicyCache) UpdatePolicy(identity *identityPkg.Identity) error {
+ _, err := cache.updateSelectorPolicy(identity)
+ return err
+}
+
+// GetAuthTypes returns the AuthTypes required by the policy between the localID and remoteID, if
+// any, otherwise returns nil.
+func (cache *PolicyCache) GetAuthTypes(localID, remoteID identityPkg.NumericIdentity) AuthTypes {
+ cache.Lock()
+ cip, ok := cache.policies[localID]
+ cache.Unlock()
+ if !ok {
+ return nil // No policy for localID (no endpoint with localID)
+ }
+
+ // SelectorPolicy is const after it has been created, so no locking needed to access it
+ selPolicy := cip.getPolicy()
+
+ var resTypes AuthTypes
+ for cs, authTypes := range selPolicy.L4Policy.AuthMap {
+ missing := false
+ for authType := range authTypes {
+ if _, exists := resTypes[authType]; !exists {
+ missing = true
+ break
+ }
+ }
+ // Only check if 'cs' selects 'remoteID' if one of the authTypes is still missing
+ // from the result
+ if missing && cs.Selects(remoteID) {
+ if resTypes == nil {
+ resTypes = make(AuthTypes, 1)
+ }
+ for authType := range authTypes {
+ resTypes[authType] = struct{}{}
+ }
+ }
+ }
+ return resTypes
+}
+
+// cachedSelectorPolicy is a wrapper around a selectorPolicy (stored in the
+// 'policy' field). It is always nested directly in the owning policyCache,
+// and is protected against concurrent writes via the policyCache mutex.
+type cachedSelectorPolicy struct {
+ lock.Mutex // lock is needed to synchronize parallel policy updates
+
+ identity *identityPkg.Identity
+ policy atomic.Pointer[selectorPolicy]
+}
+
+func newCachedSelectorPolicy(identity *identityPkg.Identity, selectorCache *SelectorCache) *cachedSelectorPolicy {
+ cip := &cachedSelectorPolicy{
+ identity: identity,
+ }
+ cip.setPolicy(newSelectorPolicy(selectorCache))
+ return cip
+}
+
+// getPolicy returns a reference to the selectorPolicy that is cached.
+//
+// Users should treat the result as immutable state that MUST NOT be modified.
+func (cip *cachedSelectorPolicy) getPolicy() *selectorPolicy {
+ return cip.policy.Load()
+}
+
+// setPolicy updates the reference to the SelectorPolicy that is cached.
+// Calls Detach() on the old policy, if any.
+func (cip *cachedSelectorPolicy) setPolicy(policy *selectorPolicy) {
+ oldPolicy := cip.policy.Swap(policy)
+ if oldPolicy != nil {
+ // Release the references the previous policy holds on the selector cache.
+ oldPolicy.Detach()
+ }
+}
+
+// Consume returns the EndpointPolicy that defines connectivity policy to
+// Identities in the specified cache.
+//
+// This denotes that a particular endpoint is 'consuming' the policy from the
+// selector policy cache.
+func (cip *cachedSelectorPolicy) Consume(owner PolicyOwner) *EndpointPolicy {
+ // TODO: This currently computes the EndpointPolicy from SelectorPolicy
+ // on-demand, however in future the cip is intended to cache the
+ // EndpointPolicy for this Identity and emit datapath deltas instead.
+ isHost := cip.identity.ID == identityPkg.ReservedIdentityHost
+ return cip.getPolicy().DistillPolicy(owner, isHost)
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/policy/identifier.go b/vendor/github.com/cilium/cilium/pkg/policy/identifier.go
new file mode 100644
index 000000000..6479c34b3
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/policy/identifier.go
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package policy
+
+import (
+ "sync"
+
+ "github.com/cilium/cilium/pkg/identity"
+ "github.com/cilium/cilium/pkg/lock"
+)
+
+// Endpoint refers to any structure which has the following properties:
+// * a node-local ID stored as a uint16
+// * a security identity
+// * a means of incrementing its policy revision
+// * a means of checking if it represents a node or a pod.
+// * a set of labels
+// * a kubernetes namespace
+type Endpoint interface {
+ GetID16() uint16
+ GetSecurityIdentity() (*identity.Identity, error)
+ PolicyRevisionBumpEvent(rev uint64)
+ IsHost() bool
+ GetOpLabels() []string
+ GetK8sNamespace() string
+}
+
+// EndpointSet is used to be able to group together a given set of Endpoints
+// that need to have a specific operation performed upon them (e.g., policy
+// revision updates).
+type EndpointSet struct {
+ mutex lock.RWMutex
+ endpoints map[Endpoint]struct{}
+}
+
+// NewEndpointSet returns an EndpointSet with the given Endpoints map
+func NewEndpointSet(m map[Endpoint]struct{}) *EndpointSet {
+ if m != nil {
+ return &EndpointSet{
+ endpoints: m,
+ }
+ }
+ return &EndpointSet{
+ endpoints: map[Endpoint]struct{}{},
+ }
+}
+
+// ForEachGo runs epFunc asynchronously inside a goroutine for each endpoint in
+// the EndpointSet. It signals to the provided WaitGroup when epFunc has been
+// executed for each endpoint.
+func (e *EndpointSet) ForEachGo(wg *sync.WaitGroup, epFunc func(epp Endpoint)) {
+ e.mutex.RLock()
+ defer e.mutex.RUnlock()
+
+ wg.Add(len(e.endpoints))
+
+ for ep := range e.endpoints {
+ go func(eppp Endpoint) {
+ epFunc(eppp)
+ wg.Done()
+ }(ep)
+ }
+}
+
+// Delete removes ep from the EndpointSet.
+func (e *EndpointSet) Delete(ep Endpoint) {
+ e.mutex.Lock()
+ delete(e.endpoints, ep)
+ e.mutex.Unlock()
+}
+
+// Insert adds ep to the EndpointSet.
+func (e *EndpointSet) Insert(ep Endpoint) {
+ e.mutex.Lock()
+ e.endpoints[ep] = struct{}{}
+ e.mutex.Unlock()
+}
+
+// Len returns the number of elements in the EndpointSet.
+func (e *EndpointSet) Len() (nElem int) {
+ e.mutex.RLock()
+ nElem = len(e.endpoints)
+ e.mutex.RUnlock()
+ return
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/policy/l4.go b/vendor/github.com/cilium/cilium/pkg/policy/l4.go
new file mode 100644
index 000000000..7c329a74f
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/policy/l4.go
@@ -0,0 +1,1384 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package policy
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "sort"
+ "strconv"
+ "sync/atomic"
+
+ cilium "github.com/cilium/proxy/go/cilium/api"
+ "github.com/sirupsen/logrus"
+
+ "github.com/cilium/cilium/api/v1/models"
+ "github.com/cilium/cilium/pkg/iana"
+ "github.com/cilium/cilium/pkg/identity"
+ "github.com/cilium/cilium/pkg/labels"
+ "github.com/cilium/cilium/pkg/lock"
+ "github.com/cilium/cilium/pkg/logging/logfields"
+ "github.com/cilium/cilium/pkg/option"
+ "github.com/cilium/cilium/pkg/policy/api"
+ "github.com/cilium/cilium/pkg/policy/trafficdirection"
+ "github.com/cilium/cilium/pkg/u8proto"
+)
+
+// covers returns true if 'l4rule' has the effect needed for the 'l3l4rule', when 'l4rule' is added
+// to the datapath, due to the l4-only rule matching if l3l4-rule is not present. This determination
+// can be done here only when both rules have the same port number (or both have a wildcarded port).
+func (l4rule *PerSelectorPolicy) covers(l3l4rule *PerSelectorPolicy) bool {
+ // Deny takes highest precedence so it is dealt with first
+ if l4rule != nil && l4rule.IsDeny {
+ // l4-only deny takes precedence
+ return true
+ } else if l3l4rule != nil && l3l4rule.IsDeny {
+ // Must not skip if l3l4 rule is deny while l4-only rule is not
+ return false
+ }
+
+ // Can not skip if currentRule has an explicit auth type and wildcardRule does not or if
+ // both have different auth types. In all other cases the auth type from the wildcardRule
+ // can be used also for the current rule.
+ // Note that the caller must deal with inheriting redirect from wildcardRule to currentRule,
+ // if any.
+ cHasAuth, cAuthType := l3l4rule.GetAuthType()
+ wHasAuth, wAuthType := l4rule.GetAuthType()
+ if cHasAuth && !wHasAuth || cHasAuth && wHasAuth && cAuthType != wAuthType {
+ return false
+ }
+
+ l3l4IsRedirect := l3l4rule.IsRedirect()
+ l4OnlyIsRedirect := l4rule.IsRedirect()
+ if l3l4IsRedirect && !l4OnlyIsRedirect {
+ // Can not skip if l3l4-rule is redirect while l4-only is not
+ return false
+ }
+
+ // else can skip
+ return true
+}
+
+// TLS context holds the secret values resolved from an 'api.TLSContext'
+type TLSContext struct {
+ TrustedCA string `json:"trustedCA,omitempty"`
+ CertificateChain string `json:"certificateChain,omitempty"`
+ PrivateKey string `json:"privateKey,omitempty"`
+}
+
+// Equal returns true if 'a' and 'b' have the same contents.
+func (a *TLSContext) Equal(b *TLSContext) bool {
+ return a == nil && b == nil || a != nil && b != nil && *a == *b
+}
+
+// MarshalJSON marsahls a redacted version of the TLSContext. We want
+// to see which fields are present, but not reveal their values in any
+// logs, etc.
+func (t *TLSContext) MarshalJSON() ([]byte, error) {
+ type tlsContext TLSContext
+ var redacted tlsContext
+ if t.TrustedCA != "" {
+ redacted.TrustedCA = "[redacted]"
+ }
+ if t.CertificateChain != "" {
+ redacted.CertificateChain = "[redacted]"
+ }
+ if t.PrivateKey != "" {
+ redacted.PrivateKey = "[redacted]"
+ }
+ return json.Marshal(&redacted)
+}
+
+type StringSet map[string]struct{}
+
+func (a StringSet) Equal(b StringSet) bool {
+ if len(a) != len(b) {
+ return false
+ }
+ for k := range a {
+ if _, exists := b[k]; !exists {
+ return false
+ }
+ }
+ return true
+}
+
+// NewStringSet returns a StringSet initialized from slice of strings.
+// Returns nil for an empty slice
+func NewStringSet(from []string) StringSet {
+ if len(from) == 0 {
+ return nil
+ }
+ set := make(StringSet, len(from))
+ for _, s := range from {
+ set[s] = struct{}{}
+ }
+ return set
+}
+
+// Merge returns StringSet with strings from both a and b.
+// Returns a or b, possibly with modifications.
+func (a StringSet) Merge(b StringSet) StringSet {
+ if len(a) == 0 {
+ return b
+ }
+ for s := range b {
+ a[s] = struct{}{}
+ }
+ return a
+}
+
+// PerSelectorPolicy contains policy rules for a CachedSelector, i.e. for a
+// selection of numerical identities.
+type PerSelectorPolicy struct {
+ // TerminatingTLS is the TLS context for the connection terminated by
+ // the L7 proxy. For egress policy this specifies the server-side TLS
+ // parameters to be applied on the connections originated from the local
+ // POD and terminated by the L7 proxy. For ingress policy this specifies
+ // the server-side TLS parameters to be applied on the connections
+ // originated from a remote source and terminated by the L7 proxy.
+ TerminatingTLS *TLSContext `json:"terminatingTLS,omitempty"`
+
+ // OriginatingTLS is the TLS context for the connections originated by
+ // the L7 proxy. For egress policy this specifies the client-side TLS
+ // parameters for the upstream connection originating from the L7 proxy
+ // to the remote destination. For ingress policy this specifies the
+ // client-side TLS parameters for the connection from the L7 proxy to
+ // the local POD.
+ OriginatingTLS *TLSContext `json:"originatingTLS,omitempty"`
+
+ // ServerNames is a list of allowed TLS SNI values. If not empty, then
+ // TLS must be present and one of the provided SNIs must be indicated in the
+ // TLS handshake.
+ ServerNames StringSet `json:"serverNames,omitempty"`
+
+ // isRedirect is 'true' when traffic must be redirected
+ isRedirect bool `json:"-"`
+
+ // Pre-computed HTTP rules, computed after rule merging is complete
+ EnvoyHTTPRules *cilium.HttpNetworkPolicyRules `json:"-"`
+
+ // CanShortCircuit is true if all 'EnvoyHTTPRules' may be
+ // short-circuited by other matches.
+ CanShortCircuit bool `json:"-"`
+
+ api.L7Rules
+
+ // Authentication is the kind of cryptographic authentication required for the traffic to be allowed
+ // at L3, if any.
+ Authentication *api.Authentication `json:"auth,omitempty"`
+
+ // IsDeny is set if this L4Filter contains should be denied
+ IsDeny bool `json:",omitempty"`
+}
+
+// Equal returns true if 'a' and 'b' represent the same L7 Rules
+func (a *PerSelectorPolicy) Equal(b *PerSelectorPolicy) bool {
+ return a == nil && b == nil || a != nil && b != nil &&
+ a.TerminatingTLS.Equal(b.TerminatingTLS) &&
+ a.OriginatingTLS.Equal(b.OriginatingTLS) &&
+ a.ServerNames.Equal(b.ServerNames) &&
+ a.isRedirect == b.isRedirect &&
+ (a.Authentication == nil && b.Authentication == nil || a.Authentication != nil && a.Authentication.DeepEqual(b.Authentication)) &&
+ a.IsDeny == b.IsDeny &&
+ a.L7Rules.DeepEqual(&b.L7Rules)
+}
+
+// AuthType enumerates the supported authentication types in api.
+// Numerically higher type takes precedence in case of conflicting auth types.
+type AuthType uint8
+
+// AuthTypes is a set of AuthTypes, usually nil if empty
+type AuthTypes map[AuthType]struct{}
+
+// Authmap maps remote selectors to their needed AuthTypes, if any
+type AuthMap map[CachedSelector]AuthTypes
+
+const (
+ // AuthTypeDisabled means no authentication required
+ AuthTypeDisabled AuthType = iota
+ // AuthTypeSpire is a mutual auth type that uses SPIFFE identities with a SPIRE server
+ AuthTypeSpire
+ // AuthTypeAlwaysFail is a simple auth type that always denies the request
+ AuthTypeAlwaysFail
+)
+
+type HasAuthType bool
+
+const (
+ DefaultAuthType HasAuthType = false
+ ExplicitAuthType HasAuthType = true
+)
+
+// GetAuthType returns the AuthType of the L4Filter.
+func (a *PerSelectorPolicy) GetAuthType() (HasAuthType, AuthType) {
+ if a == nil {
+ return DefaultAuthType, AuthTypeDisabled
+ }
+ return GetAuthType(a.Authentication)
+}
+
+// GetAuthType returns boolean HasAuthType and AuthType for the api.Authentication
+// If there is no explicit auth type, (DefaultAuthType, AuthTypeDisabled) is returned
+func GetAuthType(auth *api.Authentication) (HasAuthType, AuthType) {
+ if auth == nil {
+ return DefaultAuthType, AuthTypeDisabled
+ }
+ switch auth.Mode {
+ case api.AuthenticationModeDisabled:
+ return ExplicitAuthType, AuthTypeDisabled
+ case api.AuthenticationModeRequired:
+ return ExplicitAuthType, AuthTypeSpire
+ case api.AuthenticationModeAlwaysFail:
+ return ExplicitAuthType, AuthTypeAlwaysFail
+ default:
+ return DefaultAuthType, AuthTypeDisabled
+ }
+}
+
+// Uint8 returns AuthType as a uint8
+func (a AuthType) Uint8() uint8 {
+ return uint8(a)
+}
+
+// String returns AuthType as a string
+// This must return the strings accepted for api.AuthType
+func (a AuthType) String() string {
+ switch a {
+ case AuthTypeDisabled:
+ return "disabled"
+ case AuthTypeSpire:
+ return "spire"
+ case AuthTypeAlwaysFail:
+ return "test-always-fail"
+ }
+ return "Unknown-auth-type-" + strconv.FormatUint(uint64(a.Uint8()), 10)
+}
+
+// IsRedirect returns true if the L7Rules are a redirect.
+func (a *PerSelectorPolicy) IsRedirect() bool {
+ return a != nil && a.isRedirect
+}
+
+// HasL7Rules returns whether the `L7Rules` contains any L7 rules.
+func (a *PerSelectorPolicy) HasL7Rules() bool {
+ return !a.L7Rules.IsEmpty()
+}
+
+// L7DataMap contains a map of L7 rules per endpoint where key is a CachedSelector
+type L7DataMap map[CachedSelector]*PerSelectorPolicy
+
+func (l7 L7DataMap) MarshalJSON() ([]byte, error) {
+ if len(l7) == 0 {
+ return []byte("[]"), nil
+ }
+
+ /* First, create a sorted slice of the selectors so we can get
+ * consistent JSON output */
+ selectors := make(CachedSelectorSlice, 0, len(l7))
+ for cs := range l7 {
+ selectors = append(selectors, cs)
+ }
+ sort.Sort(selectors)
+
+ /* Now we can iterate the slice and generate JSON entries. */
+ var err error
+ buffer := bytes.NewBufferString("[")
+ for _, cs := range selectors {
+ buffer.WriteString("{\"")
+ buffer.WriteString(cs.String())
+ buffer.WriteString("\":")
+ b, err := json.Marshal(l7[cs])
+ if err == nil {
+ buffer.Write(b)
+ } else {
+ buffer.WriteString("\"L7DataMap error: ")
+ buffer.WriteString(err.Error())
+ buffer.WriteString("\"")
+ }
+ buffer.WriteString("},")
+ }
+ buffer.Truncate(buffer.Len() - 1) // Drop the final ","
+ buffer.WriteString("]")
+
+ return buffer.Bytes(), err
+}
+
+// ShallowCopy returns a shallow copy of the L7DataMap.
+func (l7 L7DataMap) ShallowCopy() L7DataMap {
+ m := make(L7DataMap, len(l7))
+ for k, v := range l7 {
+ m[k] = v
+ }
+ return m
+}
+
+// L7ParserType is the type used to indicate what L7 parser to use.
+// Consts are defined for all well known L7 parsers.
+// Unknown string values are created for key-value pair policies, which
+// are then transparently used in redirect configuration.
+type L7ParserType string
+
+func (l7 L7ParserType) String() string {
+ return (string)(l7)
+}
+
+const (
+ // ParserTypeNone represents the case where no parser type is provided.
+ ParserTypeNone L7ParserType = ""
+ // ParserTypeTLS is used for TLS origination, termination, or SNI filtering without any L7
+ // parsing. If TLS policies are used with HTTP rules, ParserTypeHTTP is used instead.
+ ParserTypeTLS L7ParserType = "tls"
+ // ParserTypeCRD is used with a custom CiliumEnvoyConfig redirection. Incompatible with any
+ // parser type with L7 enforcement (HTTP, Kafka, proxylib), as the custom Listener generally
+ // does not support them.
+ ParserTypeCRD L7ParserType = "crd"
+ // ParserTypeHTTP specifies a HTTP parser type
+ ParserTypeHTTP L7ParserType = "http"
+ // ParserTypeKafka specifies a Kafka parser type
+ ParserTypeKafka L7ParserType = "kafka"
+ // ParserTypeDNS specifies a DNS parser type
+ ParserTypeDNS L7ParserType = "dns"
+)
+
+// redirectTypes is a bitmask of redirection types of multiple filters
+type redirectTypes uint16
+
+const (
+ // redirectTypeDNS bit is set when policy contains a redirection to DNS proxy
+ redirectTypeDNS redirectTypes = 1 << iota
+ // redirectTypeEnvoy bit is set when policy contains a redirection to Envoy
+ redirectTypeEnvoy
+ // redirectTypeProxylib bits are set when policy contains a redirection to Proxylib (via
+ // Envoy)
+ redirectTypeProxylib redirectTypes = 1< 0 || !rules.IsEmpty())
+ for epsel := range l7 {
+ l7policy := &PerSelectorPolicy{
+ TerminatingTLS: terminatingTLS,
+ OriginatingTLS: originatingTLS,
+ Authentication: auth,
+ IsDeny: deny,
+ ServerNames: NewStringSet(sni),
+ isRedirect: isRedirect,
+ }
+ if rules != nil {
+ l7policy.L7Rules = *rules
+ }
+ l7[epsel] = l7policy
+ }
+}
+
+type TLSDirection string
+
+const (
+ TerminatingTLS TLSDirection = "terminating"
+ OriginatingTLS TLSDirection = "originating"
+)
+
+func (l4 *L4Filter) getCerts(policyCtx PolicyContext, tls *api.TLSContext, direction TLSDirection) (*TLSContext, error) {
+ if tls == nil {
+ return nil, nil
+ }
+ ca, public, private, err := policyCtx.GetTLSContext(tls)
+ if err != nil {
+ log.WithError(err).Warningf("policy: Error getting %s TLS Context.", direction)
+ return nil, err
+ }
+ switch direction {
+ case TerminatingTLS:
+ if public == "" || private == "" {
+ return nil, fmt.Errorf("Terminating TLS context is missing certs.")
+ }
+ case OriginatingTLS:
+ if ca == "" {
+ return nil, fmt.Errorf("Originating TLS context is missing CA certs.")
+ }
+ default:
+ return nil, fmt.Errorf("invalid TLS direction: %s", direction)
+ }
+
+ return &TLSContext{
+ TrustedCA: ca,
+ CertificateChain: public,
+ PrivateKey: private,
+ }, nil
+}
+
+// createL4Filter creates a filter for L4 policy that applies to the specified
+// endpoints and port/protocol, with reference to the original rules that the
+// filter is derived from. This filter may be associated with a series of L7
+// rules via the `rule` parameter.
+// Not called with an empty peerEndpoints.
+func createL4Filter(policyCtx PolicyContext, peerEndpoints api.EndpointSelectorSlice, auth *api.Authentication, rule api.Ports, port api.PortProtocol,
+ protocol api.L4Proto, ruleLabels labels.LabelArray, ingress bool, fqdns api.FQDNSelectorSlice) (*L4Filter, error) {
+ selectorCache := policyCtx.GetSelectorCache()
+
+ portName := ""
+ p := uint64(0)
+ if iana.IsSvcName(port.Port) {
+ portName = port.Port
+ } else {
+ // already validated via PortRule.Validate()
+ p, _ = strconv.ParseUint(port.Port, 0, 16)
+ }
+
+ // already validated via L4Proto.Validate(), never "ANY"
+ u8p, _ := u8proto.ParseProtocol(string(protocol))
+
+ l4 := &L4Filter{
+ Port: int(p), // 0 for L3-only rules and named ports
+ PortName: portName, // non-"" for named ports
+ Protocol: protocol,
+ U8Proto: u8p,
+ PerSelectorPolicies: make(L7DataMap),
+ RuleOrigin: make(map[CachedSelector]labels.LabelArrayList), // Filled in below.
+ Ingress: ingress,
+ }
+
+ if peerEndpoints.SelectsAllEndpoints() {
+ l4.wildcard = l4.cacheIdentitySelector(api.WildcardEndpointSelector, ruleLabels, selectorCache)
+ } else {
+ l4.cacheIdentitySelectors(peerEndpoints, ruleLabels, selectorCache)
+ l4.cacheFQDNSelectors(fqdns, ruleLabels, selectorCache)
+ }
+
+ var terminatingTLS *TLSContext
+ var originatingTLS *TLSContext
+ var rules *api.L7Rules
+ var sni []string
+ forceRedirect := false
+ pr := rule.GetPortRule()
+ if pr != nil {
+ rules = pr.Rules
+ sni = pr.ServerNames
+
+ // Get TLS contexts, if any
+ var err error
+ terminatingTLS, err = l4.getCerts(policyCtx, pr.TerminatingTLS, TerminatingTLS)
+ if err != nil {
+ return nil, err
+ }
+ originatingTLS, err = l4.getCerts(policyCtx, pr.OriginatingTLS, OriginatingTLS)
+ if err != nil {
+ return nil, err
+ }
+
+ // Set parser type to TLS, if TLS. This will be overridden by L7 below, if rules
+ // exists.
+ if terminatingTLS != nil || originatingTLS != nil || len(pr.ServerNames) > 0 {
+ l4.L7Parser = ParserTypeTLS
+ }
+
+ // Determine L7ParserType from rules present. Earlier validation ensures rules
+ // for multiple protocols are not present here.
+ if rules != nil {
+ // we need this to redirect DNS UDP (or ANY, which is more useful)
+ if len(rules.DNS) > 0 {
+ l4.L7Parser = ParserTypeDNS
+ } else if protocol == api.ProtoTCP { // Other than DNS only support TCP
+ switch {
+ case len(rules.HTTP) > 0:
+ l4.L7Parser = ParserTypeHTTP
+ case len(rules.Kafka) > 0:
+ l4.L7Parser = ParserTypeKafka
+ case rules.L7Proto != "":
+ l4.L7Parser = (L7ParserType)(rules.L7Proto)
+ }
+ }
+ }
+
+ // Override the parser type to CRD is applicable.
+ if pr.Listener != nil {
+ l4.L7Parser = ParserTypeCRD
+ ns := policyCtx.GetNamespace()
+ resource := pr.Listener.EnvoyConfig
+ switch resource.Kind {
+ case "CiliumEnvoyConfig":
+ if ns == "" {
+ // Cluster-scoped CCNP tries to use namespaced
+ // CiliumEnvoyConfig
+ //
+ // TODO: Catch this in rule validation once we have a
+ // validation context in there so that we can differentiate
+ // between CNP and CCNP at validation time.
+ return nil, fmt.Errorf("Listener %q in CCNP can not use Kind CiliumEnvoyConfig", pr.Listener.Name)
+ }
+ case "CiliumClusterwideEnvoyConfig":
+ // CNP refers to a cluster-scoped listener
+ ns = ""
+ default:
+ }
+ l4.Listener = api.ResourceQualifiedName(ns, resource.Name, pr.Listener.Name, api.ForceNamespace)
+ forceRedirect = true
+ }
+ }
+
+ if l4.L7Parser != ParserTypeNone || auth != nil || policyCtx.IsDeny() {
+ l4.PerSelectorPolicies.addPolicyForSelector(rules, terminatingTLS, originatingTLS, auth, policyCtx.IsDeny(), sni, forceRedirect)
+ }
+
+ for cs := range l4.PerSelectorPolicies {
+ l4.RuleOrigin[cs] = labels.LabelArrayList{ruleLabels}
+ }
+
+ return l4, nil
+}
+
+func (l4 *L4Filter) removeSelectors(selectorCache *SelectorCache) {
+ selectors := make(CachedSelectorSlice, 0, len(l4.PerSelectorPolicies))
+ for cs := range l4.PerSelectorPolicies {
+ selectors = append(selectors, cs)
+ }
+ selectorCache.RemoveSelectors(selectors, l4)
+}
+
+// detach releases the references held in the L4Filter and must be called before
+// the filter is left to be garbage collected.
+// L4Filter may still be accessed concurrently after it has been detached.
+func (l4 *L4Filter) detach(selectorCache *SelectorCache) {
+ l4.removeSelectors(selectorCache)
+ l4.policy.Store(nil)
+}
+
+// attach signifies that the L4Filter is ready and reacheable for updates
+// from SelectorCache. L4Filter (and L4Policy) is read-only after this is called,
+// multiple goroutines will be reading the fields from that point on.
+func (l4 *L4Filter) attach(ctx PolicyContext, l4Policy *L4Policy) policyFeatures {
+ // All rules have been added to the L4Filter at this point.
+ // Sort the rules label array list for more efficient equality comparison.
+ for _, labels := range l4.RuleOrigin {
+ labels.Sort()
+ }
+
+ var features policyFeatures
+ for cs, cp := range l4.PerSelectorPolicies {
+ if cp != nil {
+ if cp.IsDeny {
+ features.setFeature(denyRules)
+ }
+
+ hasAuth, authType := GetAuthType(cp.Authentication)
+ if hasAuth {
+ features.setFeature(authRules)
+
+ if authType != AuthTypeDisabled {
+ if l4Policy.AuthMap == nil {
+ l4Policy.AuthMap = make(AuthMap, 1)
+ }
+ authTypes := l4Policy.AuthMap[cs]
+ if authTypes == nil {
+ authTypes = make(AuthTypes, 1)
+ }
+ authTypes[authType] = struct{}{}
+ l4Policy.AuthMap[cs] = authTypes
+ }
+ }
+
+ // Compute Envoy policies when a policy is ready to be used
+ if len(cp.L7Rules.HTTP) > 0 {
+ cp.EnvoyHTTPRules, cp.CanShortCircuit = ctx.GetEnvoyHTTPRules(&cp.L7Rules)
+ }
+ }
+ }
+
+ l4.policy.Store(l4Policy)
+ return features
+}
+
+// createL4IngressFilter creates a filter for L4 policy that applies to the
+// specified endpoints and port/protocol for ingress traffic, with reference
+// to the original rules that the filter is derived from. This filter may be
+// associated with a series of L7 rules via the `rule` parameter.
+//
+// hostWildcardL7 determines if L7 traffic from Host should be
+// wildcarded (in the relevant daemon mode).
+func createL4IngressFilter(policyCtx PolicyContext, fromEndpoints api.EndpointSelectorSlice, auth *api.Authentication, hostWildcardL7 []string, rule api.Ports, port api.PortProtocol,
+ protocol api.L4Proto, ruleLabels labels.LabelArray) (*L4Filter, error) {
+
+ filter, err := createL4Filter(policyCtx, fromEndpoints, auth, rule, port, protocol, ruleLabels, true, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // If the filter would apply proxy redirection for the Host, when we should accept
+ // everything from host, then wildcard Host at L7.
+ if len(hostWildcardL7) > 0 {
+ for cs, l7 := range filter.PerSelectorPolicies {
+ if l7.IsRedirect() && cs.Selects(identity.ReservedIdentityHost) {
+ for _, name := range hostWildcardL7 {
+ selector := api.ReservedEndpointSelectors[name]
+ filter.cacheIdentitySelector(selector, ruleLabels, policyCtx.GetSelectorCache())
+ }
+ }
+ }
+ }
+
+ return filter, nil
+}
+
+// createL4EgressFilter creates a filter for L4 policy that applies to the
+// specified endpoints and port/protocol for egress traffic, with reference
+// to the original rules that the filter is derived from. This filter may be
+// associated with a series of L7 rules via the `rule` parameter.
+func createL4EgressFilter(policyCtx PolicyContext, toEndpoints api.EndpointSelectorSlice, auth *api.Authentication, rule api.Ports, port api.PortProtocol,
+ protocol api.L4Proto, ruleLabels labels.LabelArray, fqdns api.FQDNSelectorSlice) (*L4Filter, error) {
+
+ return createL4Filter(policyCtx, toEndpoints, auth, rule, port, protocol, ruleLabels, false, fqdns)
+}
+
+// redirectType returns the redirectType for this filter
+func (l4 *L4Filter) redirectType() redirectTypes {
+ switch l4.L7Parser {
+ case ParserTypeNone:
+ return redirectTypeNone
+ case ParserTypeDNS:
+ return redirectTypeDNS
+ case ParserTypeHTTP, ParserTypeTLS, ParserTypeCRD:
+ return redirectTypeEnvoy
+ default:
+ // all other (non-empty) values are used for proxylib redirects
+ return redirectTypeProxylib
+ }
+}
+
+// IsRedirect returns true if the L4 filter contains a port redirection
+func (l4 *L4Filter) IsRedirect() bool {
+ return l4.L7Parser != ParserTypeNone
+}
+
+// Marshal returns the `L4Filter` in a JSON string.
+func (l4 *L4Filter) Marshal() string {
+ b, err := json.Marshal(l4)
+ if err != nil {
+ b = []byte("\"L4Filter error: " + err.Error() + "\"")
+ }
+ return string(b)
+}
+
+// String returns the `L4Filter` in a human-readable string.
+func (l4 *L4Filter) String() string {
+ b, err := json.Marshal(l4)
+ if err != nil {
+ return err.Error()
+ }
+ return string(b)
+}
+
+// Note: Only used for policy tracing
+func (l4 *L4Filter) matchesLabels(labels labels.LabelArray) (bool, bool) {
+ if l4.wildcard != nil {
+ perSelectorPolicy := l4.PerSelectorPolicies[l4.wildcard]
+ isDeny := perSelectorPolicy != nil && perSelectorPolicy.IsDeny
+ return true, isDeny
+ } else if len(labels) == 0 {
+ return false, false
+ }
+
+ var selected bool
+ for sel, rule := range l4.PerSelectorPolicies {
+ // slow, but OK for tracing
+ if idSel, ok := sel.(*labelIdentitySelector); ok && idSel.xxxMatches(labels) {
+ isDeny := rule != nil && rule.IsDeny
+ selected = true
+ if isDeny {
+ return true, isDeny
+ }
+ }
+ }
+ return selected, false
+}
+
+// addL4Filter adds 'filterToMerge' into the 'resMap'. Returns an error if it
+// the 'filterToMerge' can't be merged with an existing filter for the same
+// port and proto.
+func addL4Filter(policyCtx PolicyContext,
+ ctx *SearchContext, resMap L4PolicyMap,
+ p api.PortProtocol, proto api.L4Proto,
+ filterToMerge *L4Filter,
+ ruleLabels labels.LabelArray) error {
+
+ key := p.Port + "/" + string(proto)
+ existingFilter, ok := resMap[key]
+ if !ok {
+ resMap[key] = filterToMerge
+ return nil
+ }
+
+ selectorCache := policyCtx.GetSelectorCache()
+ if err := mergePortProto(ctx, existingFilter, filterToMerge, selectorCache); err != nil {
+ filterToMerge.detach(selectorCache)
+ return err
+ }
+
+ // To keep the rule origin tracking correct, merge the rule label arrays for each CachedSelector
+ // we know about. New CachedSelectors are added.
+ for cs, newLabels := range filterToMerge.RuleOrigin {
+ if existingLabels, ok := existingFilter.RuleOrigin[cs]; ok {
+ existingFilter.RuleOrigin[cs] = existingLabels.MergeSorted(newLabels)
+ } else {
+ existingFilter.RuleOrigin[cs] = newLabels
+ }
+ }
+
+ resMap[key] = existingFilter
+ return nil
+}
+
+// L4PolicyMap is a list of L4 filters indexable by protocol/port
+// key format: "port/proto"
+type L4PolicyMap map[string]*L4Filter
+
+type policyFeatures uint8
+
+const (
+ denyRules policyFeatures = 1 << iota
+ authRules
+
+ allFeatures policyFeatures = ^policyFeatures(0)
+)
+
+func (pf *policyFeatures) setFeature(feature policyFeatures) {
+ *pf |= feature
+}
+
+func (pf policyFeatures) contains(feature policyFeatures) bool {
+ return pf&feature != 0
+}
+
+type L4DirectionPolicy struct {
+ PortRules L4PolicyMap
+
+ // features tracks properties of PortRules to skip code when features are not used
+ features policyFeatures
+}
+
+func newL4DirectionPolicy() L4DirectionPolicy {
+ return L4DirectionPolicy{
+ PortRules: L4PolicyMap{},
+ }
+}
+
+// Detach removes the cached selectors held by L4PolicyMap from the
+// selectorCache, allowing the map to be garbage collected when there
+// are no more references to it.
+func (l4 L4DirectionPolicy) Detach(selectorCache *SelectorCache) {
+ l4.PortRules.Detach(selectorCache)
+}
+
+// detach is used directly from tracing and testing functions
+func (l4 L4PolicyMap) Detach(selectorCache *SelectorCache) {
+ for _, f := range l4 {
+ f.detach(selectorCache)
+ }
+}
+
+// Attach makes all the L4Filters to point back to the L4Policy that contains them.
+// This is done before the L4PolicyMap is exposed to concurrent access.
+// Returns the bitmask of all redirect types for this policymap.
+func (l4 *L4DirectionPolicy) attach(ctx PolicyContext, l4Policy *L4Policy) redirectTypes {
+ var redirectTypes redirectTypes
+ var features policyFeatures
+ for _, f := range l4.PortRules {
+ features |= f.attach(ctx, l4Policy)
+ redirectTypes |= f.redirectType()
+ }
+ l4.features = features
+ return redirectTypes
+}
+
+// containsAllL3L4 checks if the L4PolicyMap contains all L4 ports in `ports`.
+// For L4Filters that specify ToEndpoints or FromEndpoints, uses `labels` to
+// determine whether the policy allows L4 communication between the corresponding
+// endpoints.
+// Returns api.Denied in the following conditions:
+// - If a single port is not present in the `L4PolicyMap` and is not allowed
+// by the distilled L3 policy
+// - If a port is present in the `L4PolicyMap`, but it applies ToEndpoints or
+// FromEndpoints constraints that require labels not present in `labels`.
+//
+// Otherwise, returns api.Allowed.
+//
+// Note: Only used for policy tracing
+func (l4 L4PolicyMap) containsAllL3L4(labels labels.LabelArray, ports []*models.Port) api.Decision {
+ if len(l4) == 0 {
+ return api.Allowed
+ }
+
+ // Check L3-only filters first.
+ filter, match := l4[api.PortProtocolAny]
+ if match {
+
+ matches, isDeny := filter.matchesLabels(labels)
+ switch {
+ case matches && isDeny:
+ return api.Denied
+ case matches:
+ return api.Allowed
+ }
+ }
+
+ for _, l4Ctx := range ports {
+ portStr := l4Ctx.Name
+ if !iana.IsSvcName(portStr) {
+ portStr = strconv.FormatUint(uint64(l4Ctx.Port), 10)
+ }
+ lwrProtocol := l4Ctx.Protocol
+ var isUDPDeny, isTCPDeny, isSCTPDeny bool
+ switch lwrProtocol {
+ case "", models.PortProtocolANY:
+ tcpPort := portStr + "/TCP"
+ tcpFilter, tcpmatch := l4[tcpPort]
+ if tcpmatch {
+ tcpmatch, isTCPDeny = tcpFilter.matchesLabels(labels)
+ }
+
+ udpPort := portStr + "/UDP"
+ udpFilter, udpmatch := l4[udpPort]
+ if udpmatch {
+ udpmatch, isUDPDeny = udpFilter.matchesLabels(labels)
+ }
+
+ sctpPort := portStr + "/SCTP"
+ sctpFilter, sctpmatch := l4[sctpPort]
+ if sctpmatch {
+ sctpmatch, isSCTPDeny = sctpFilter.matchesLabels(labels)
+ }
+
+ if (!tcpmatch && !udpmatch && !sctpmatch) || (isTCPDeny && isUDPDeny && isSCTPDeny) {
+ return api.Denied
+ }
+ default:
+ port := portStr + "/" + lwrProtocol
+ filter, match := l4[port]
+ if !match {
+ return api.Denied
+ }
+ matches, isDeny := filter.matchesLabels(labels)
+ if !matches || isDeny {
+ return api.Denied
+ }
+ }
+ }
+ return api.Allowed
+}
+
+type L4Policy struct {
+ Ingress L4DirectionPolicy
+ Egress L4DirectionPolicy
+
+ AuthMap AuthMap
+
+ // Revision is the repository revision used to generate this policy.
+ Revision uint64
+
+ // redirectTypes is a bitmap containing the types of redirect contained by this policy. It
+ // is computed after the policy maps to avoid scanning them repeatedly when using the
+ // L4Policy
+ redirectTypes redirectTypes
+
+ // Endpoint policies using this L4Policy
+ // These are circular references, cleaned up in Detach()
+ // This mutex is taken while Endpoint mutex is held, so Endpoint lock
+ // MUST always be taken before this mutex.
+ mutex lock.RWMutex
+ users map[*EndpointPolicy]struct{}
+}
+
+// NewL4Policy creates a new L4Policy
+func NewL4Policy(revision uint64) L4Policy {
+ return L4Policy{
+ Ingress: newL4DirectionPolicy(),
+ Egress: newL4DirectionPolicy(),
+ Revision: revision,
+ users: make(map[*EndpointPolicy]struct{}),
+ }
+}
+
+// insertUser adds a user to the L4Policy so that incremental
+// updates of the L4Policy may be forwarded to the users of it.
+func (l4 *L4Policy) insertUser(user *EndpointPolicy) {
+ l4.mutex.Lock()
+
+ // 'users' is set to nil when the policy is detached. This
+ // happens to the old policy when it is being replaced with a
+ // new one, or when the last endpoint using this policy is
+ // removed.
+ // In the case of an policy update it is possible that an
+ // endpoint has started regeneration before the policy was
+ // updated, and that the policy was updated before the said
+ // endpoint reached this point. In this case the endpoint's
+ // policy is going to be recomputed soon after and we do
+ // nothing here.
+ if l4.users != nil {
+ l4.users[user] = struct{}{}
+ }
+
+ l4.mutex.Unlock()
+}
+
+// removeUser removes a user that no longer needs incremental updates
+// from the L4Policy.
+func (l4 *L4Policy) removeUser(user *EndpointPolicy) {
+ // 'users' is set to nil when the policy is detached. This
+ // happens to the old policy when it is being replaced with a
+ // new one, or when the last endpoint using this policy is
+ // removed.
+ l4.mutex.Lock()
+ if l4.users != nil {
+ delete(l4.users, user)
+ }
+ l4.mutex.Unlock()
+}
+
+// AccumulateMapChanges distributes the given changes to the registered users.
+//
+// The caller is responsible for making sure the same identity is not
+// present in both 'adds' and 'deletes'.
+func (l4 *L4Policy) AccumulateMapChanges(cs CachedSelector, adds, deletes []identity.NumericIdentity, l4Filter *L4Filter,
+ direction trafficdirection.TrafficDirection, redirect, isDeny bool, hasAuth HasAuthType, authType AuthType) {
+ port := uint16(l4Filter.Port)
+ proto := uint8(l4Filter.U8Proto)
+ derivedFrom := l4Filter.RuleOrigin[cs]
+
+ // Must take a copy of 'users' as GetNamedPort() will lock the Endpoint below and
+ // the Endpoint lock may not be taken while 'l4.mutex' is held.
+ l4.mutex.RLock()
+ users := make(map[*EndpointPolicy]struct{}, len(l4.users))
+ for user := range l4.users {
+ users[user] = struct{}{}
+ }
+ l4.mutex.RUnlock()
+
+ for epPolicy := range users {
+ // Skip if endpoint has no policy maps
+ if !epPolicy.PolicyOwner.HasBPFPolicyMap() {
+ continue
+ }
+ // resolve named port
+ if port == 0 && l4Filter.PortName != "" {
+ port = epPolicy.PolicyOwner.GetNamedPort(direction == trafficdirection.Ingress, l4Filter.PortName, proto)
+ if port == 0 {
+ continue
+ }
+ }
+ epPolicy.policyMapChanges.AccumulateMapChanges(cs, adds, deletes, port, proto, direction, redirect, isDeny, hasAuth, authType, derivedFrom)
+ }
+}
+
+// Detach makes the L4Policy ready for garbage collection, removing
+// circular pointer references.
+// Note that the L4Policy itself is not modified in any way, so that it may still
+// be used concurrently.
+func (l4 *L4Policy) Detach(selectorCache *SelectorCache) {
+ l4.Ingress.Detach(selectorCache)
+ l4.Egress.Detach(selectorCache)
+
+ l4.mutex.Lock()
+ l4.users = nil
+ l4.mutex.Unlock()
+}
+
+// Attach makes all the L4Filters to point back to the L4Policy that contains them.
+// This is done before the L4Policy is exposed to concurrent access.
+func (l4 *L4Policy) Attach(ctx PolicyContext) {
+ ingressRedirects := l4.Ingress.attach(ctx, l4)
+ egressRedirects := l4.Egress.attach(ctx, l4)
+ l4.redirectTypes = ingressRedirects | egressRedirects
+}
+
+// IngressCoversContext checks if the receiver's ingress L4Policy contains
+// all `dPorts` and `labels`.
+//
+// Note: Only used for policy tracing
+func (l4 *L4PolicyMap) IngressCoversContext(ctx *SearchContext) api.Decision {
+ return l4.containsAllL3L4(ctx.From, ctx.DPorts)
+}
+
+// EgressCoversContext checks if the receiver's egress L4Policy contains
+// all `dPorts` and `labels`.
+//
+// Note: Only used for policy tracing
+func (l4 *L4PolicyMap) EgressCoversContext(ctx *SearchContext) api.Decision {
+ return l4.containsAllL3L4(ctx.To, ctx.DPorts)
+}
+
+// HasRedirect returns true if the L4 policy contains at least one port redirection
+func (l4 *L4Policy) HasRedirect() bool {
+ return l4 != nil && l4.redirectTypes != redirectTypeNone
+}
+
+// HasEnvoyRedirect returns true if the L4 policy contains at least one port redirection to Envoy
+func (l4 *L4Policy) HasEnvoyRedirect() bool {
+ return l4 != nil && l4.redirectTypes&redirectTypeEnvoy == redirectTypeEnvoy
+}
+
+// HasProxylibRedirect returns true if the L4 policy contains at least one port redirection to Proxylib
+func (l4 *L4Policy) HasProxylibRedirect() bool {
+ return l4 != nil && l4.redirectTypes&redirectTypeProxylib == redirectTypeProxylib
+}
+
+func (l4 *L4Policy) GetModel() *models.L4Policy {
+ if l4 == nil {
+ return nil
+ }
+
+ ingress := []*models.PolicyRule{}
+ for _, v := range l4.Ingress.PortRules {
+ rulesBySelector := map[string][][]string{}
+ derivedFrom := labels.LabelArrayList{}
+ for sel, rules := range v.RuleOrigin {
+ derivedFrom.MergeSorted(rules)
+ rulesBySelector[sel.String()] = rules.GetModel()
+ }
+ ingress = append(ingress, &models.PolicyRule{
+ Rule: v.Marshal(),
+ DerivedFromRules: derivedFrom.GetModel(),
+ RulesBySelector: rulesBySelector,
+ })
+ }
+
+ egress := []*models.PolicyRule{}
+ for _, v := range l4.Egress.PortRules {
+ derivedFrom := labels.LabelArrayList{}
+ for _, rules := range v.RuleOrigin {
+ derivedFrom.MergeSorted(rules)
+ }
+ egress = append(egress, &models.PolicyRule{
+ Rule: v.Marshal(),
+ DerivedFromRules: derivedFrom.GetModel(),
+ })
+ }
+
+ return &models.L4Policy{
+ Ingress: ingress,
+ Egress: egress,
+ }
+}
+
+// ProxyPolicy is any type which encodes state needed to redirect to an L7
+// proxy.
+type ProxyPolicy interface {
+ CopyL7RulesPerEndpoint() L7DataMap
+ GetL7Parser() L7ParserType
+ GetIngress() bool
+ GetPort() uint16
+ GetListener() string
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/policy/mapstate.go b/vendor/github.com/cilium/cilium/pkg/policy/mapstate.go
new file mode 100644
index 000000000..94214da79
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/policy/mapstate.go
@@ -0,0 +1,1461 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package policy
+
+import (
+ "fmt"
+ "net"
+ "strconv"
+
+ "github.com/sirupsen/logrus"
+ "golang.org/x/exp/maps"
+ "golang.org/x/exp/slices"
+
+ "github.com/cilium/cilium/pkg/identity"
+ "github.com/cilium/cilium/pkg/ip"
+ "github.com/cilium/cilium/pkg/labels"
+ "github.com/cilium/cilium/pkg/lock"
+ "github.com/cilium/cilium/pkg/logging/logfields"
+ "github.com/cilium/cilium/pkg/option"
+ "github.com/cilium/cilium/pkg/policy/trafficdirection"
+)
+
+var (
+ // localHostKey represents an ingress L3 allow from the local host.
+ localHostKey = Key{
+ Identity: identity.ReservedIdentityHost.Uint32(),
+ TrafficDirection: trafficdirection.Ingress.Uint8(),
+ }
+ // localRemoteNodeKey represents an ingress L3 allow from remote nodes.
+ localRemoteNodeKey = Key{
+ Identity: identity.ReservedIdentityRemoteNode.Uint32(),
+ TrafficDirection: trafficdirection.Ingress.Uint8(),
+ }
+ // allKey represents a key for unknown traffic, i.e., all traffic.
+ allKey = Key{
+ Identity: identity.IdentityUnknown.Uint32(),
+ }
+)
+
+const (
+ LabelKeyPolicyDerivedFrom = "io.cilium.policy.derived-from"
+ LabelAllowLocalHostIngress = "allow-localhost-ingress"
+ LabelAllowRemoteHostIngress = "allow-remotehost-ingress"
+ LabelAllowAnyIngress = "allow-any-ingress"
+ LabelAllowAnyEgress = "allow-any-egress"
+ LabelVisibilityAnnotation = "visibility-annotation"
+)
+
+// MapState is a map interface for policy maps
+type MapState interface {
+ Get(Key) (MapStateEntry, bool)
+ Insert(Key, MapStateEntry)
+ Delete(Key)
+ InsertIfNotExists(Key, MapStateEntry) bool
+ // ForEach allows iteration over the MapStateEntries. It returns true iff
+ // the iteration was not stopped early by the callback.
+ ForEach(func(Key, MapStateEntry) (cont bool)) (complete bool)
+ // ForEachAllow behaves like ForEach, but only iterates MapStateEntries which are not denies.
+ ForEachAllow(func(Key, MapStateEntry) (cont bool)) (complete bool)
+ // ForEachDeny behaves like ForEach, but only iterates MapStateEntries which are denies.
+ ForEachDeny(func(Key, MapStateEntry) (cont bool)) (complete bool)
+ GetIdentities(*logrus.Logger) ([]int64, []int64)
+ GetDenyIdentities(*logrus.Logger) ([]int64, []int64)
+ RevertChanges(ChangeState)
+ AddVisibilityKeys(PolicyOwner, uint16, *VisibilityMetadata, ChangeState)
+ Len() int
+ Equals(MapState) bool
+
+ allowAllIdentities(ingress, egress bool)
+ determineAllowLocalhostIngress()
+ deniesL4(policyOwner PolicyOwner, l4 *L4Filter) bool
+ denyPreferredInsertWithChanges(newKey Key, newEntry MapStateEntry, identities Identities, features policyFeatures, changes ChangeState)
+ deleteKeyWithChanges(key Key, owner MapStateOwner, changes ChangeState)
+}
+
+// mapState is a state of a policy map.
+type mapState struct {
+ allows map[Key]MapStateEntry
+ denies map[Key]MapStateEntry
+}
+
+type Identities interface {
+ GetNetsLocked(identity.NumericIdentity) []*net.IPNet
+}
+
+// Key is the userspace representation of a policy key in BPF. It is
+// intentionally duplicated from pkg/maps/policymap to avoid pulling in the
+// BPF dependency to this package.
+type Key struct {
+ // Identity is the numeric identity to / from which traffic is allowed.
+ Identity uint32
+ // DestPort is the port at L4 to / from which traffic is allowed, in
+ // host-byte order.
+ DestPort uint16
+ // NextHdr is the protocol which is allowed.
+ Nexthdr uint8
+ // TrafficDirection indicates in which direction Identity is allowed
+ // communication (egress or ingress).
+ TrafficDirection uint8
+}
+
+// String returns a string representation of the Key
+func (k Key) String() string {
+ return "Identity=" + strconv.FormatUint(uint64(k.Identity), 10) +
+ ",DestPort=" + strconv.FormatUint(uint64(k.DestPort), 10) +
+ ",Nexthdr=" + strconv.FormatUint(uint64(k.Nexthdr), 10) +
+ ",TrafficDirection=" + strconv.FormatUint(uint64(k.TrafficDirection), 10)
+}
+
+// IsIngress returns true if the key refers to an ingress policy key
+func (k Key) IsIngress() bool {
+ return k.TrafficDirection == trafficdirection.Ingress.Uint8()
+}
+
+// IsEgress returns true if the key refers to an egress policy key
+func (k Key) IsEgress() bool {
+ return k.TrafficDirection == trafficdirection.Egress.Uint8()
+}
+
+// PortProtoIsBroader returns true if the receiver Key has broader
+// port-protocol than the argument Key. That is a port-protocol
+// that covers the argument Key's port-protocol and is larger.
+// An equal port-protocol will return false.
+func (k Key) PortProtoIsBroader(c Key) bool {
+ return k.DestPort == 0 && c.DestPort != 0 ||
+ k.Nexthdr == 0 && c.Nexthdr != 0
+}
+
+// PortProtoIsEqual returns true if the port-protocols of the
+// two keys are exactly equal.
+func (k Key) PortProtoIsEqual(c Key) bool {
+ return k.DestPort == c.DestPort && k.Nexthdr == c.Nexthdr
+}
+
+type Keys map[Key]struct{}
+
+type MapStateOwner interface{}
+
+// MapStateEntry is the configuration associated with a Key in a
+// MapState. This is a minimized version of policymap.PolicyEntry.
+type MapStateEntry struct {
+ // The proxy port, in host byte order.
+ // If 0 (default), there is no proxy redirection for the corresponding
+ // Key. Any other value signifies proxy redirection.
+ ProxyPort uint16
+
+ // IsDeny is true when the policy should be denied.
+ IsDeny bool
+
+ // hasAuthType is 'DefaultAuthType' when policy has no explicit AuthType set. In this case the
+ // value of AuthType is derived from more generic entries covering this entry.
+ hasAuthType HasAuthType
+
+ // AuthType is non-zero when authentication is required for the traffic to be allowed.
+ AuthType AuthType
+
+ // DerivedFromRules tracks the policy rules this entry derives from
+ // In sorted order.
+ DerivedFromRules labels.LabelArrayList
+
+ // Owners collects the keys in the map and selectors in the policy that require this key to be present.
+ // TODO: keep track which selector needed the entry to be deny, redirect, or just allow.
+ owners map[MapStateOwner]struct{}
+
+ // dependents contains the keys for entries create based on this entry. These entries
+ // will be deleted once all of the owners are deleted.
+ dependents Keys
+}
+
+// NewMapStateEntry creates a map state entry. If redirect is true, the
+// caller is expected to replace the ProxyPort field before it is added to
+// the actual BPF map.
+// 'cs' is used to keep track of which policy selectors need this entry. If it is 'nil' this entry
+// will become sticky and cannot be completely removed via incremental updates. Even in this case
+// the entry may be overridden or removed by a deny entry.
+func NewMapStateEntry(cs MapStateOwner, derivedFrom labels.LabelArrayList, redirect, deny bool, hasAuth HasAuthType, authType AuthType) MapStateEntry {
+ var proxyPort uint16
+ if redirect {
+ // Any non-zero value will do, as the callers replace this with the
+ // actual proxy listening port number before the entry is added to the
+ // actual bpf map.
+ proxyPort = 1
+ }
+
+ return MapStateEntry{
+ ProxyPort: proxyPort,
+ DerivedFromRules: derivedFrom,
+ IsDeny: deny,
+ hasAuthType: hasAuth,
+ AuthType: authType,
+ owners: map[MapStateOwner]struct{}{cs: {}},
+ }
+}
+
+// AddDependent adds 'key' to the set of dependent keys.
+func (e *MapStateEntry) AddDependent(key Key) {
+ if e.dependents == nil {
+ e.dependents = make(Keys, 1)
+ }
+ e.dependents[key] = struct{}{}
+}
+
+// RemoveDependent removes 'key' from the set of dependent keys.
+func (e *MapStateEntry) RemoveDependent(key Key) {
+ delete(e.dependents, key)
+ // Nil the map when empty. This is mainly to make unit testing easier.
+ if len(e.dependents) == 0 {
+ e.dependents = nil
+ }
+}
+
+// HasDependent returns true if the 'key' is contained
+// within the set of dependent keys
+func (e *MapStateEntry) HasDependent(key Key) bool {
+ if e.dependents == nil {
+ return false
+ }
+ _, ok := e.dependents[key]
+ return ok
+}
+
+var worldNets = map[identity.NumericIdentity][]*net.IPNet{
+ identity.ReservedIdentityWorld: {
+ {IP: net.IPv4zero, Mask: net.CIDRMask(0, net.IPv4len*8)},
+ {IP: net.IPv6zero, Mask: net.CIDRMask(0, net.IPv6len*8)},
+ },
+ identity.ReservedIdentityWorldIPv4: {
+ {IP: net.IPv4zero, Mask: net.CIDRMask(0, net.IPv4len*8)},
+ },
+ identity.ReservedIdentityWorldIPv6: {
+ {IP: net.IPv6zero, Mask: net.CIDRMask(0, net.IPv6len*8)},
+ },
+}
+
+// getNets returns the most specific CIDR for an identity. For the "World" identity
+// it returns both IPv4 and IPv6.
+func getNets(identities Identities, ident uint32) []*net.IPNet {
+ // World identities are handled explicitly for two reasons:
+ // 1. 'identities' may be nil, but world identities are still expected to be considered
+ // 2. SelectorCache is not be informed of reserved/world identities in all test cases
+ id := identity.NumericIdentity(ident)
+ if id <= identity.ReservedIdentityWorldIPv6 {
+ return worldNets[id]
+ }
+ // CIDR identities have a local scope, so we can skip the rest if id is not of local scope.
+ if !id.HasLocalScope() || identities == nil {
+ return nil
+ }
+ return identities.GetNetsLocked(id)
+}
+
+// NewMapState creates a new MapState interface
+func NewMapState(initMap map[Key]MapStateEntry) MapState {
+ return newMapState(initMap)
+}
+
+func newMapState(initMap map[Key]MapStateEntry) *mapState {
+ m := &mapState{
+ allows: make(map[Key]MapStateEntry),
+ denies: make(map[Key]MapStateEntry),
+ }
+ for k, v := range initMap {
+ m.Insert(k, v)
+ }
+ return m
+}
+
+// Get the MapStateEntry that matches the Key.
+func (ms *mapState) Get(k Key) (MapStateEntry, bool) {
+ v, ok := ms.denies[k]
+ if ok {
+ return v, ok
+ }
+ v, ok = ms.allows[k]
+ return v, ok
+}
+
+// Insert the Key and matcthing MapStateEntry into the
+// MapState
+func (ms *mapState) Insert(k Key, v MapStateEntry) {
+ if v.IsDeny {
+ delete(ms.allows, k)
+ ms.denies[k] = v
+ } else {
+ delete(ms.denies, k)
+ ms.allows[k] = v
+ }
+}
+
+// Delete removes the Key an related MapStateEntry.
+func (ms *mapState) Delete(k Key) {
+ delete(ms.allows, k)
+ delete(ms.denies, k)
+}
+
+// ForEach iterates over every Key MapStateEntry and stops when the function
+// argument returns false. It returns false iff the iteration was cut short.
+func (ms *mapState) ForEach(f func(Key, MapStateEntry) (cont bool)) (complete bool) {
+ if complete := ms.ForEachAllow(f); !complete {
+ return complete
+ }
+
+ return ms.ForEachDeny(f)
+}
+
+// ForEachAllow iterates over every Key MapStateEntry that isn't a deny and
+// stops when the function argument returns false
+func (ms *mapState) ForEachAllow(f func(Key, MapStateEntry) (cont bool)) (complete bool) {
+ for k, v := range ms.allows {
+ if !f(k, v) {
+ return false
+ }
+ }
+ return true
+}
+
+// ForEachDeny iterates over every Key MapStateEntry that is a deny and
+// stops when the function argument returns false
+func (ms *mapState) ForEachDeny(f func(Key, MapStateEntry) (cont bool)) (complete bool) {
+ for k, v := range ms.denies {
+ if !f(k, v) {
+ return false
+ }
+ }
+ return true
+}
+
+// Len returns the length of the map
+func (ms *mapState) Len() int {
+ return len(ms.allows) + len(ms.denies)
+}
+
+// Equals determines if this MapState is equal to the
+// argument MapState
+func (msA *mapState) Equals(msB MapState) bool {
+ if msA.Len() != msB.Len() {
+ return false
+ }
+
+ return msB.ForEach(func(kA Key, vA MapStateEntry) bool {
+ if vB, ok := msB.Get(kA); ok {
+ if !(&vB).DatapathEqual(&vA) {
+ return false
+ }
+ } else {
+ return false
+ }
+
+ return true
+ })
+}
+
+// AddDependent adds 'key' to the set of dependent keys.
+func (ms *mapState) AddDependent(owner Key, dependent Key, changes ChangeState) {
+ if e, exists := ms.allows[owner]; exists {
+ ms.addDependentOnEntry(owner, e, dependent, changes)
+ } else if e, exists := ms.denies[owner]; exists {
+ ms.addDependentOnEntry(owner, e, dependent, changes)
+ }
+}
+
+// addDependentOnEntry adds 'dependent' to the set of dependent keys of 'e'.
+func (ms *mapState) addDependentOnEntry(owner Key, e MapStateEntry, dependent Key, changes ChangeState) {
+ if _, exists := e.dependents[dependent]; !exists {
+ if changes.Old != nil {
+ changes.Old.Insert(owner, e)
+ }
+ e.AddDependent(dependent)
+ if e.IsDeny {
+ delete(ms.allows, owner)
+ ms.denies[owner] = e
+ } else {
+ delete(ms.denies, owner)
+ ms.allows[owner] = e
+ }
+ }
+}
+
+// RemoveDependent removes 'key' from the list of dependent keys.
+// This is called when a dependent entry is being deleted.
+// If 'old' is not nil, then old value is added there before any modifications.
+func (ms *mapState) RemoveDependent(owner Key, dependent Key, old MapState) {
+ if e, exists := ms.allows[owner]; exists {
+ old.InsertIfNotExists(owner, e)
+ e.RemoveDependent(dependent)
+ delete(ms.denies, owner)
+ ms.allows[owner] = e
+ return
+ }
+
+ if e, exists := ms.denies[owner]; exists {
+ old.InsertIfNotExists(owner, e)
+ e.RemoveDependent(dependent)
+ delete(ms.allows, owner)
+ ms.denies[owner] = e
+ }
+}
+
+// Merge adds owners, dependents, and DerivedFromRules from a new 'entry' to an existing
+// entry 'e'. 'entry' is not modified.
+// IsDeny, ProxyPort, and AuthType are merged by giving precedence to deny over non-deny, proxy
+// redirection over no proxy redirection, and explicit auth type over default auth type.
+func (e *MapStateEntry) Merge(entry *MapStateEntry) {
+ // Deny is sticky
+ if !e.IsDeny {
+ e.IsDeny = entry.IsDeny
+ }
+
+ // Deny entries have no proxy redirection nor auth requirement
+ if e.IsDeny {
+ e.ProxyPort = 0
+ e.hasAuthType = DefaultAuthType
+ e.AuthType = AuthTypeDisabled
+ } else {
+ // Proxy port takes precedence, but may be updated
+ if entry.ProxyPort != 0 {
+ e.ProxyPort = entry.ProxyPort
+ }
+
+ // Explicit auth takes precedence over defaulted one.
+ if entry.hasAuthType == ExplicitAuthType {
+ if e.hasAuthType == ExplicitAuthType {
+ // Numerically higher AuthType takes precedence when both are explicitly defined
+ if entry.AuthType > e.AuthType {
+ e.AuthType = entry.AuthType
+ }
+ } else {
+ e.hasAuthType = ExplicitAuthType
+ e.AuthType = entry.AuthType
+ }
+ } else if e.hasAuthType == DefaultAuthType {
+ e.AuthType = entry.AuthType // new default takes precedence
+ }
+ }
+
+ if e.owners == nil && len(entry.owners) > 0 {
+ e.owners = make(map[MapStateOwner]struct{}, len(entry.owners))
+ }
+ for k, v := range entry.owners {
+ e.owners[k] = v
+ }
+
+ // merge dependents
+ for k := range entry.dependents {
+ e.AddDependent(k)
+ }
+
+ // merge DerivedFromRules
+ if len(entry.DerivedFromRules) > 0 {
+ e.DerivedFromRules.MergeSorted(entry.DerivedFromRules)
+ }
+}
+
+// IsRedirectEntry returns true if e contains a redirect
+func (e *MapStateEntry) IsRedirectEntry() bool {
+ return e.ProxyPort != 0
+}
+
+// DatapathEqual returns true of two entries are equal in the datapath's PoV,
+// i.e., IsDeny, ProxyPort and AuthType are the same for both entries.
+func (e *MapStateEntry) DatapathEqual(o *MapStateEntry) bool {
+ if e == nil || o == nil {
+ return e == o
+ }
+
+ return e.IsDeny == o.IsDeny && e.ProxyPort == o.ProxyPort && e.AuthType == o.AuthType
+}
+
+// DeepEqual is a manually generated deepequal function, deeply comparing the
+// receiver with other. in must be non-nil.
+// Defined manually due to deepequal-gen not supporting interface types.
+// 'cachedNets' member is ignored in comparison, as it is a cached value and
+// makes no functional difference.
+func (e *MapStateEntry) DeepEqual(o *MapStateEntry) bool {
+ if !e.DatapathEqual(o) {
+ return false
+ }
+
+ if !e.DerivedFromRules.DeepEqual(&o.DerivedFromRules) {
+ return false
+ }
+
+ if len(e.owners) != len(o.owners) {
+ return false
+ }
+ for k := range o.owners {
+ if _, exists := e.owners[k]; !exists {
+ return false
+ }
+ }
+
+ if len(e.dependents) != len(o.dependents) {
+ return false
+ }
+ for k := range o.dependents {
+ if _, exists := e.dependents[k]; !exists {
+ return false
+ }
+ }
+
+ // ignoring cachedNets
+
+ return true
+}
+
+// String returns a string representation of the MapStateEntry
+func (e MapStateEntry) String() string {
+ return "ProxyPort=" + strconv.FormatUint(uint64(e.ProxyPort), 10) +
+ ",IsDeny=" + strconv.FormatBool(e.IsDeny) +
+ ",AuthType=" + e.AuthType.String() +
+ ",DerivedFromRules=" + fmt.Sprintf("%v", e.DerivedFromRules)
+}
+
+// denyPreferredInsert inserts a key and entry into the map by given preference
+// to deny entries, and L3-only deny entries over L3-L4 allows.
+// This form may be used when a full policy is computed and we are not yet interested
+// in accumulating incremental changes.
+// Caller may insert the same MapStateEntry multiple times for different Keys, but all from the same
+// owner.
+func (ms *mapState) denyPreferredInsert(newKey Key, newEntry MapStateEntry, identities Identities, features policyFeatures) {
+ // Enforce nil values from NewMapStateEntry
+ newEntry.dependents = nil
+
+ ms.denyPreferredInsertWithChanges(newKey, newEntry, identities, features, ChangeState{})
+}
+
+// addKeyWithChanges adds a 'key' with value 'entry' to 'keys' keeping track of incremental changes in 'adds' and 'deletes', and any changed or removed old values in 'old', if not nil.
+func (ms *mapState) addKeyWithChanges(key Key, entry MapStateEntry, changes ChangeState) {
+ // Keep all owners that need this entry so that it is deleted only if all the owners delete their contribution
+ oldEntry, exists := ms.Get(key)
+ if exists {
+ // Deny entry can only be overridden by another deny entry
+ if oldEntry.IsDeny && !entry.IsDeny {
+ return
+ }
+
+ if entry.DeepEqual(&oldEntry) {
+ return // nothing to do
+ }
+
+ // Save old value before any changes, if desired
+ if changes.Old != nil {
+ changes.Old.InsertIfNotExists(key, oldEntry)
+ }
+
+ oldEntry.Merge(&entry)
+ ms.Insert(key, oldEntry)
+ } else {
+ // Newly inserted entries must have their own containers, so that they
+ // remain separate when new owners/dependents are added to existing entries
+ entry.DerivedFromRules = slices.Clone(entry.DerivedFromRules)
+ entry.owners = maps.Clone(entry.owners)
+ entry.dependents = maps.Clone(entry.dependents)
+ ms.Insert(key, entry)
+ }
+
+ // Record an incremental Add if desired and entry is new or changed
+ if changes.Adds != nil && (!exists || !oldEntry.DatapathEqual(&entry)) {
+ changes.Adds[key] = struct{}{}
+ // Key add overrides any previous delete of the same key
+ if changes.Deletes != nil {
+ delete(changes.Deletes, key)
+ }
+ }
+}
+
+// deleteKeyWithChanges deletes a 'key' from 'keys' keeping track of incremental changes in 'adds' and 'deletes'.
+// The key is unconditionally deleted if 'cs' is nil, otherwise only the contribution of this 'cs' is removed.
+func (ms *mapState) deleteKeyWithChanges(key Key, owner MapStateOwner, changes ChangeState) {
+ if entry, exists := ms.Get(key); exists {
+ // Save old value before any changes, if desired
+ if changes.Old == nil {
+ changes.Old = newMapState(nil)
+ }
+ oldAdded := changes.Old.InsertIfNotExists(key, entry)
+
+ if owner != nil {
+ // remove the contribution of the given selector only
+ if _, exists = entry.owners[owner]; exists {
+ // Remove the contribution of this selector from the entry
+ delete(entry.owners, owner)
+ if ownerKey, ok := owner.(Key); ok {
+ ms.RemoveDependent(ownerKey, key, changes.Old)
+ }
+ // key is not deleted if other owners still need it
+ if len(entry.owners) > 0 {
+ return
+ }
+ } else {
+ // 'owner' was not found, do not change anything
+ if oldAdded {
+ changes.Old.Delete(key)
+ }
+ return
+ }
+ }
+
+ // Remove this key from all owners' dependents maps if no owner was given.
+ // Owner is nil when deleting more specific entries (e.g., L3/L4) when
+ // adding deny entries that cover them (e.g., L3-deny).
+ if owner == nil {
+ for owner := range entry.owners {
+ if owner != nil {
+ if ownerKey, ok := owner.(Key); ok {
+ ms.RemoveDependent(ownerKey, key, changes.Old)
+ }
+ }
+ }
+ }
+
+ // Check if dependent entries need to be deleted as well
+ for k := range entry.dependents {
+ ms.deleteKeyWithChanges(k, key, changes)
+ }
+ if changes.Deletes != nil {
+ changes.Deletes[key] = struct{}{}
+ // Remove a potential previously added key
+ if changes.Adds != nil {
+ delete(changes.Adds, key)
+ }
+ }
+
+ delete(ms.allows, key)
+ delete(ms.denies, key)
+ }
+}
+
+// identityIsSupersetOf compares two entries and keys to see if the primary identity contains
+// the compared identity. This means that either that primary identity is 0 (i.e. it is a superset
+// of every other identity), or one of the subnets of the primary identity fully contains or is
+// equal to one of the subnets in the compared identity (note:this covers cases like "reserved:world").
+func identityIsSupersetOf(primaryIdentity, compareIdentity uint32, identities Identities) bool {
+ // If the identities are equal then neither is a superset (for the purposes of our business logic).
+ if primaryIdentity == compareIdentity {
+ return false
+ }
+
+ // Consider an identity that selects a broader CIDR as a superset of
+ // an identity that selects a narrower CIDR. For instance, an identity
+ // corresponding to 192.0.0.0/16 is a superset of the identity that
+ // corresponds to 192.0.2.3/32.
+ //
+ // The reasons we need to do this are surprisingly complex, taking into
+ // consideration design decisions around the handling of ToFQDNs policy
+ // and how L4PolicyMap/L4Filter structures cache the policies with
+ // respect to specific CIDRs. More specifically:
+ // - At the time of initial L4Filter creation, it is not known which
+ // specific CIDRs (or corresponding identities) are selected by a
+ // toFQDNs rule in the policy engine.
+ // - It is possible to have a CIDR deny rule that should deny peers
+ // that are allowed by a ToFQDNs statement. The precedence rules in
+ // the API for such policy conflicts define that the deny should take
+ // precedence.
+ // - Consider a case where there is a deny rule for 192.0.0.0/16 with
+ // an allow rule for cilium.io, and one of the IP addresses for
+ // cilium.io is 192.0.2.3.
+ // - If the IP for cilium.io was known at initial policy computation
+ // time, then we would calculate the MapState from the L4Filters and
+ // immediately determine that there is a conflict between the
+ // L4Filter that denies 192.0.0.0/16 vs. the allow for 192.0.2.3.
+ // From this we could immediately discard the "allow to 192.0.2.3"
+ // policymap entry during policy calculation. This would satisfy the
+ // API constraint that deny rules take precedence over allow rules.
+ // However, this is not the case for ToFQDNs -- the IPs are not known
+ // until DNS resolution time by the selected application / endpoint.
+ // - In order to make ToFQDNs policy implementation efficient, it uses
+ // a shorter incremental policy computation path that attempts to
+ // directly implement the ToFQDNs allow into a MapState entry without
+ // reaching back up to the L4Filter layer to iterate all selectors
+ // to determine traffic reachability for this newly learned IP.
+ // - As such, when the new ToFQDNs allow for the 192.0.2.3 IP address
+ // is implemented, we must iterate back through all existing MapState
+ // entries to determine whether any of the other map entries already
+ // denies this traffic by virtue of the IP prefix being a superset of
+ // this new allow. This allows us to ensure that the broader CIDR
+ // deny semantics are correctly applied when there is a combination
+ // of CIDR deny rules and ToFQDNs allow rules.
+ //
+ // An alternative to this approach might be to change the ToFQDNs
+ // policy calculation layer to reference back to the L4Filter layer,
+ // and perhaps introduce additional CIDR caching somewhere there so
+ // that this policy computation can be efficient while handling DNS
+ // responses. As of the writing of this message, such there is no
+ // active proposal to implement this proposal. As a result, any time
+ // there is an incremental policy update for a new map entry, we must
+ // iterate through all entries in the map and re-evaluate superset
+ // relationships for deny entries to ensure that policy precedence is
+ // correctly implemented between the new and old entries, taking into
+ // account whether the identities may represent CIDRs that have a
+ // superset relationship.
+ return primaryIdentity == 0 && compareIdentity != 0 ||
+ ip.NetsContainsAny(getNets(identities, primaryIdentity),
+ getNets(identities, compareIdentity))
+}
+
+// protocolsMatch checks to see if two given keys match on protocol.
+// This means that either one of them covers all protocols or they
+// are equal.
+func protocolsMatch(a, b Key) bool {
+ return a.Nexthdr == 0 || b.Nexthdr == 0 || a.Nexthdr == b.Nexthdr
+}
+
+// RevertChanges undoes changes to 'keys' as indicated by 'changes.adds' and 'changes.old' collected via
+// denyPreferredInsertWithChanges().
+func (ms *mapState) RevertChanges(changes ChangeState) {
+ for k := range changes.Adds {
+ delete(ms.allows, k)
+ delete(ms.denies, k)
+ }
+ // 'old' contains all the original values of both modified and deleted entries
+ changes.Old.ForEach(func(k Key, v MapStateEntry) bool {
+ ms.Insert(k, v)
+ return true
+ })
+}
+
+// denyPreferredInsertWithChanges contains the most important business logic for policy insertions. It inserts
+// a key and entry into the map by giving preference to deny entries, and L3-only deny entries over L3-L4 allows.
+// Incremental changes performed are recorded in 'adds' and 'deletes', if not nil.
+// See https://docs.google.com/spreadsheets/d/1WANIoZGB48nryylQjjOw6lKjI80eVgPShrdMTMalLEw#gid=2109052536 for details
+func (ms *mapState) denyPreferredInsertWithChanges(newKey Key, newEntry MapStateEntry, identities Identities, features policyFeatures, changes ChangeState) {
+ // Skip deny rules processing if the policy in this direction has no deny rules
+ if !features.contains(denyRules) {
+ ms.authPreferredInsert(newKey, newEntry, features, changes)
+ return
+ }
+
+ allCpy := allKey
+ allCpy.TrafficDirection = newKey.TrafficDirection
+ // If we have a deny "all" we don't accept any kind of map entry.
+ if _, ok := ms.denies[allCpy]; ok {
+ return
+ }
+ if newEntry.IsDeny {
+ ms.ForEachAllow(func(k Key, v MapStateEntry) bool {
+ // Protocols and traffic directions that don't match ensure that the policies
+ // do not interact in anyway.
+ if newKey.TrafficDirection != k.TrafficDirection || !protocolsMatch(newKey, k) {
+ return true
+ }
+
+ if identityIsSupersetOf(k.Identity, newKey.Identity, identities) {
+ if newKey.PortProtoIsBroader(k) {
+ // If this iterated-allow-entry is a superset of the new-entry
+ // and it has a more specific port-protocol than the new-entry
+ // then an additional copy of the new-entry with the more
+ // specific port-protocol of the iterated-allow-entry must be inserted.
+ newKeyCpy := newKey
+ newKeyCpy.DestPort = k.DestPort
+ newKeyCpy.Nexthdr = k.Nexthdr
+ l3l4DenyEntry := NewMapStateEntry(newKey, newEntry.DerivedFromRules, false, true, DefaultAuthType, AuthTypeDisabled)
+ ms.addKeyWithChanges(newKeyCpy, l3l4DenyEntry, changes)
+ // L3-only entries can be deleted incrementally so we need to track their
+ // effects on other entries so that those effects can be reverted when the
+ // identity is removed.
+ newEntry.AddDependent(newKeyCpy)
+ }
+ } else if (newKey.Identity == k.Identity ||
+ identityIsSupersetOf(newKey.Identity, k.Identity, identities)) &&
+ (newKey.PortProtoIsBroader(k) || newKey.PortProtoIsEqual(k)) {
+ // If the new-entry is a superset (or equal) of the iterated-allow-entry and
+ // the new-entry has a broader (or equal) port-protocol then we
+ // should delete the iterated-allow-entry
+ ms.deleteKeyWithChanges(k, nil, changes)
+ }
+ return true
+ })
+
+ bailed := false
+ ms.ForEachDeny(func(k Key, v MapStateEntry) bool {
+ // Protocols and traffic directions that don't match ensure that the policies
+ // do not interact in anyway.
+ if newKey.TrafficDirection != k.TrafficDirection || !protocolsMatch(newKey, k) {
+ return true
+ }
+
+ if (newKey.Identity == k.Identity ||
+ identityIsSupersetOf(k.Identity, newKey.Identity, identities)) &&
+ k.DestPort == 0 && k.Nexthdr == 0 &&
+ !v.HasDependent(newKey) {
+ // If this iterated-deny-entry is a supserset (or equal) of the new-entry and
+ // the iterated-deny-entry is an L3-only policy then we
+ // should not insert the new entry (as long as it is not one
+ // of the special L4-only denies we created to cover the special
+ // case of a superset-allow with a more specific port-protocol).
+ //
+ // NOTE: This condition could be broader to reject more deny entries,
+ // but there *may* be performance tradeoffs.
+ bailed = true
+ return false
+ } else if (newKey.Identity == k.Identity ||
+ identityIsSupersetOf(newKey.Identity, k.Identity, identities)) &&
+ newKey.DestPort == 0 && newKey.Nexthdr == 0 &&
+ !newEntry.HasDependent(k) {
+ // If this iterated-deny-entry is a subset (or equal) of the new-entry and
+ // the new-entry is an L3-only policy then we
+ // should delete the iterated-deny-entry (as long as it is not one
+ // of the special L4-only denies we created to cover the special
+ // case of a superset-allow with a more specific port-protocol).
+ //
+ // NOTE: This condition could be broader to reject more deny entries,
+ // but there *may* be performance tradeoffs.
+ ms.deleteKeyWithChanges(k, nil, changes)
+ }
+ return true
+ })
+
+ if !bailed {
+ ms.addKeyWithChanges(newKey, newEntry, changes)
+ }
+ } else {
+ // NOTE: We do not delete redundant allow entries.
+ bailed := false
+ ms.ForEachDeny(func(k Key, v MapStateEntry) bool {
+ // Protocols and traffic directions that don't match ensure that the policies
+ // do not interact in anyway.
+ if newKey.TrafficDirection != k.TrafficDirection || !protocolsMatch(newKey, k) {
+ return true
+ }
+ if identityIsSupersetOf(newKey.Identity, k.Identity, identities) {
+ if k.PortProtoIsBroader(newKey) {
+ // If the new-entry is *only* superset of the iterated-deny-entry
+ // and the new-entry has a more specific port-protocol than the
+ // iterated-deny-entry then an additional copy of the iterated-deny-entry
+ // with the more specific port-porotocol of the new-entry must
+ // be added.
+ denyKeyCpy := k
+ denyKeyCpy.DestPort = newKey.DestPort
+ denyKeyCpy.Nexthdr = newKey.Nexthdr
+ l3l4DenyEntry := NewMapStateEntry(k, v.DerivedFromRules, false, true, DefaultAuthType, AuthTypeDisabled)
+ ms.addKeyWithChanges(denyKeyCpy, l3l4DenyEntry, changes)
+ // L3-only entries can be deleted incrementally so we need to track their
+ // effects on other entries so that those effects can be reverted when the
+ // identity is removed.
+ ms.addDependentOnEntry(k, v, denyKeyCpy, changes)
+ }
+ } else if (k.Identity == newKey.Identity ||
+ identityIsSupersetOf(k.Identity, newKey.Identity, identities)) &&
+ (k.PortProtoIsBroader(newKey) || k.PortProtoIsEqual(newKey)) &&
+ !v.HasDependent(newKey) {
+ // If the iterated-deny-entry is a superset (or equal) of the new-entry and has a
+ // broader (or equal) port-protocol than the new-entry then the new
+ // entry should not be inserted.
+ bailed = true
+ return false
+ }
+
+ return true
+ })
+
+ if !bailed {
+ ms.authPreferredInsert(newKey, newEntry, features, changes)
+ }
+ }
+}
+
+// IsSuperSetOf checks if the receiver Key is a superset of the argument Key, and returns a
+// specificity score of the receiver key (higher score is more specific), if so. Being a superset
+// means that the receiver key would match all the traffic of the argument key without being the
+// same key. Hence, a L3-only key is not a superset of a L4-only key, as the L3-only key would match
+// the traffic for the given L3 only, while the L4-only key matches traffic on the given port for
+// all the L3's.
+// Returns 0 if the receiver key is not a superset of the argument key.
+//
+// Specificity score for all possible superset wildcard patterns. Datapath requires proto to be specified if port is specified.
+// x. L3/proto/port
+// 1. */*/*
+// 2. */proto/*
+// 3. */proto/port
+// 4. ID/*/*
+// 5. ID/proto/*
+// ( ID/proto/port can not be superset of anything )
+func (k Key) IsSuperSetOf(other Key) int {
+ if k.TrafficDirection != other.TrafficDirection {
+ return 0 // TrafficDirection must match for 'k' to be a superset of 'other'
+ }
+ if k.Identity == 0 {
+ if other.Identity == 0 {
+ if k.Nexthdr == 0 { // k.DestPort == 0 is implied
+ if other.Nexthdr != 0 {
+ return 1 // */*/* is a superset of */proto/x
+ } // else both are */*/*
+ } else if k.Nexthdr == other.Nexthdr {
+ if k.DestPort == 0 && other.DestPort != 0 {
+ return 2 // */proto/* is a superset of */proto/port
+ } // else more specific or different ports
+ } // else more specific or different protocol
+ } else {
+ // Wildcard L3 is a superset of a specific L3 only if wildcard L3 is also wildcard L4, or the L4's match between the keys
+ if k.Nexthdr == 0 { // k.DestPort == 0 is implied
+ return 1 // */*/* is a superset of ID/x/x
+ } else if k.Nexthdr == other.Nexthdr {
+ if k.DestPort == 0 {
+ return 2 // */proto/* is a superset of ID/proto/x
+ } else if k.DestPort == other.DestPort {
+ return 3 // */proto/port is a superset of ID/proto/port
+ } // else more specific or different ports
+ } // else more specific or different protocol
+ }
+ } else if k.Identity == other.Identity {
+ if k.Nexthdr == 0 {
+ if other.Nexthdr != 0 {
+ return 4 // ID/*/* is a superset of ID/proto/x
+ } // else both are ID/*/*
+ } else if k.Nexthdr == other.Nexthdr {
+ if k.DestPort == 0 && other.DestPort != 0 {
+ return 5 // ID/proto/* is a superset of ID/proto/port
+ } // else more specific or different ports
+ } // else more specific or different protocol
+ } // else more specific or different identity
+ return 0
+}
+
+// authPreferredInsert applies AuthType of a more generic entry to more specific entries, if not
+// explicitly specified.
+//
+// This function is expected to be called for a map insertion after deny
+// entry evaluation. If there is a map entry that is a superset of 'newKey'
+// which denies traffic matching 'newKey', then this function should not be called.
+func (ms *mapState) authPreferredInsert(newKey Key, newEntry MapStateEntry, features policyFeatures, changes ChangeState) {
+ if features.contains(authRules) {
+ if newEntry.hasAuthType == DefaultAuthType {
+ // New entry has a default auth type.
+ // Fill in the AuthType from more generic entries with an explicit auth type
+ maxSpecificity := 0
+ l3l4State := newMapState(nil)
+
+ ms.ForEachAllow(func(k Key, v MapStateEntry) bool {
+ // Only consider the same Traffic direction
+ if newKey.TrafficDirection != k.TrafficDirection {
+ return true
+ }
+
+ // Nothing to be done if entry has default AuthType
+ if v.hasAuthType == DefaultAuthType {
+ return true
+ }
+
+ // Find out if 'k' is an identity-port-proto superset of 'newKey'
+ if specificity := k.IsSuperSetOf(newKey); specificity > 0 {
+ if specificity > maxSpecificity {
+ // AuthType from the most specific superset is
+ // applied to 'newEntry'
+ newEntry.AuthType = v.AuthType
+ maxSpecificity = specificity
+ }
+ } else {
+ // Check if a new L3L4 entry must be created due to L3-only
+ // 'k' specifying an explicit AuthType and an L4-only 'newKey' not
+ // having an explicit AuthType. In this case AuthType should
+ // only override the AuthType for the L3 & L4 combination,
+ // not L4 in general.
+ //
+ // These need to be collected and only added if there is a
+ // superset key of newKey with an explicit auth type. In
+ // this case AuthType of the new L4-only entry was
+ // overridden by a more generic entry and 'max_specificity >
+ // 0' after the loop.
+ if k.Identity != 0 && k.Nexthdr == 0 && newKey.Identity == 0 && newKey.Nexthdr != 0 {
+ newKeyCpy := k
+ newKeyCpy.DestPort = newKey.DestPort
+ newKeyCpy.Nexthdr = newKey.Nexthdr
+ l3l4AuthEntry := NewMapStateEntry(k, v.DerivedFromRules, false, false, DefaultAuthType, v.AuthType)
+ l3l4AuthEntry.DerivedFromRules.MergeSorted(newEntry.DerivedFromRules)
+ l3l4State.allows[newKeyCpy] = l3l4AuthEntry
+ }
+ }
+ return true
+ })
+ // Add collected L3/L4 entries if the auth type of the new entry was not
+ // overridden by a more generic entry. If it was overridden, the new L3L4
+ // entries are not needed as the L4-only entry with an overridden AuthType
+ // will be matched before the L3-only entries in the datapath.
+ if maxSpecificity == 0 {
+ l3l4State.ForEach(func(k Key, v MapStateEntry) bool {
+ ms.addKeyWithChanges(k, v, changes)
+ // L3-only entries can be deleted incrementally so we need to track their
+ // effects on other entries so that those effects can be reverted when the
+ // identity is removed.
+ newEntry.AddDependent(k)
+ return true
+ })
+ }
+ } else {
+ // New entry has an explicit auth type.
+ // Check if the new entry is the most specific superset of any other entry
+ // with the default auth type, and propagate the auth type from the new
+ // entry to such entries.
+ explicitSubsetKeys := make(Keys)
+ defaultSubsetKeys := make(map[Key]int)
+
+ ms.ForEachAllow(func(k Key, v MapStateEntry) bool {
+ // Only consider the same Traffic direction
+ if newKey.TrafficDirection != k.TrafficDirection {
+ return true
+ }
+
+ // Find out if 'newKey' is a superset of 'k'
+ if specificity := newKey.IsSuperSetOf(k); specificity > 0 {
+ if v.hasAuthType == ExplicitAuthType {
+ // store for later comparison
+ explicitSubsetKeys[k] = struct{}{}
+ } else {
+ defaultSubsetKeys[k] = specificity
+ }
+ } else if v.hasAuthType == DefaultAuthType {
+ // Check if a new L3L4 entry must be created due to L3-only
+ // 'newKey' with an explicit AuthType and an L4-only 'k' not
+ // having an explicit AuthType. In this case AuthType should
+ // only override the AuthType for the L3 & L4 combination,
+ // not L4 in general.
+ if newKey.Identity != 0 && newKey.Nexthdr == 0 && k.Identity == 0 && k.Nexthdr != 0 {
+ newKeyCpy := newKey
+ newKeyCpy.DestPort = k.DestPort
+ newKeyCpy.Nexthdr = k.Nexthdr
+ l3l4AuthEntry := NewMapStateEntry(newKey, newEntry.DerivedFromRules, false, false, DefaultAuthType, newEntry.AuthType)
+ l3l4AuthEntry.DerivedFromRules.MergeSorted(v.DerivedFromRules)
+ ms.addKeyWithChanges(newKeyCpy, l3l4AuthEntry, changes)
+ // L3-only entries can be deleted incrementally so we need to track their
+ // effects on other entries so that those effects can be reverted when the
+ // identity is removed.
+ newEntry.AddDependent(newKeyCpy)
+ }
+ }
+
+ return true
+ })
+ // Find out if this newKey is the most specific superset for all the subset keys with default auth type
+ Next:
+ for k, specificity := range defaultSubsetKeys {
+ for l := range explicitSubsetKeys {
+ if s := l.IsSuperSetOf(k); s > specificity {
+ // k has a more specific superset key than the newKey, skip
+ continue Next
+ }
+ }
+ // newKey is the most specific superset with an explicit auth type,
+ // propagate auth type from newEntry to the entry of k
+ v, _ := ms.Get(k)
+ v.AuthType = newEntry.AuthType
+ ms.addKeyWithChanges(k, v, changes) // Update the map value
+ }
+ }
+ }
+ ms.addKeyWithChanges(newKey, newEntry, changes)
+}
+
+var visibilityDerivedFromLabels = labels.LabelArray{
+ labels.NewLabel(LabelKeyPolicyDerivedFrom, LabelVisibilityAnnotation, labels.LabelSourceReserved),
+}
+
+var visibilityDerivedFrom = labels.LabelArrayList{visibilityDerivedFromLabels}
+
+// InsertIfNotExists only inserts `key=value` if `key` does not exist in keys already
+// returns 'true' if 'key=entry' was added to 'keys'
+func (ms *mapState) InsertIfNotExists(key Key, entry MapStateEntry) bool {
+ isDeny := entry.IsDeny
+ if ms != nil && (isDeny && ms.denies != nil || !isDeny && ms.allows != nil) {
+ m := ms.allows
+ if isDeny {
+ m = ms.denies
+ }
+ if _, exists := m[key]; !exists {
+ // new containers to keep this entry separate from the one that may remain in 'keys'
+ entry.DerivedFromRules = slices.Clone(entry.DerivedFromRules)
+ entry.owners = maps.Clone(entry.owners)
+ entry.dependents = maps.Clone(entry.dependents)
+
+ m[key] = entry
+ return true
+ }
+ }
+ return false
+}
+
+// AddVisibilityKeys adjusts and expands PolicyMapState keys
+// and values to redirect for visibility on the port of the visibility
+// annotation while still denying traffic on this port for identities
+// for which the traffic is denied.
+//
+// Datapath lookup order is, from highest to lowest precedence:
+// 1. L3/L4
+// 2. L4-only (wildcard L3)
+// 3. L3-only (wildcard L4)
+// 4. Allow-all
+//
+// This means that the L4-only allow visibility key can only be added if there is an
+// allow-all key, and all L3-only deny keys are expanded to L3/L4 keys. If no
+// L4-only key is added then also the L3-only allow keys need to be expanded to
+// L3/L4 keys for visibility redirection. In addition the existing L3/L4 and L4-only
+// allow keys need to be redirected to the proxy port, if not already redirected.
+//
+// The above can be accomplished by:
+//
+// 1. Change existing L4-only ALLOW key on matching port that does not already
+// redirect to redirect.
+// - e.g., 0:80=allow,0 -> 0:80=allow,
+// 2. If allow-all policy exists, add L4-only visibility redirect key if the L4-only
+// key does not already exist.
+// - e.g., 0:0=allow,0 -> add 0:80=allow, if 0:80 does not exist
+// - this allows all traffic on port 80, but see step 5 below.
+// 3. Change all L3/L4 ALLOW keys on matching port that do not already redirect to
+// redirect.
+// - e.g, :80=allow,0 -> :80=allow,
+// 4. For each L3-only ALLOW key add the corresponding L3/L4 ALLOW redirect if no
+// L3/L4 key already exists and no L4-only key already exists and one is not added.
+// - e.g., :0=allow,0 -> add :80=allow, if :80
+// and 0:80 do not exist
+// 5. If a new L4-only key was added: For each L3-only DENY key add the
+// corresponding L3/L4 DENY key if no L3/L4 key already exists.
+// - e.g., :0=deny,0 -> add :80=deny,0 if :80 does not exist
+//
+// With the above we only change/expand existing allow keys to redirect, and
+// expand existing drop keys to also drop on the port of interest, if a new
+// L4-only key allowing the port is added.
+//
+// 'adds' and 'oldValues' are updated with the changes made. 'adds' contains both the added and
+// changed keys. 'oldValues' contains the old values for changed keys. This function does not
+// delete any keys.
+func (ms *mapState) AddVisibilityKeys(e PolicyOwner, redirectPort uint16, visMeta *VisibilityMetadata, changes ChangeState) {
+ direction := trafficdirection.Egress
+ if visMeta.Ingress {
+ direction = trafficdirection.Ingress
+ }
+
+ allowAllKey := Key{
+ TrafficDirection: direction.Uint8(),
+ }
+ key := Key{
+ DestPort: visMeta.Port,
+ Nexthdr: uint8(visMeta.Proto),
+ TrafficDirection: direction.Uint8(),
+ }
+
+ entry := NewMapStateEntry(nil, visibilityDerivedFrom, true, false, DefaultAuthType, AuthTypeDisabled)
+ entry.ProxyPort = redirectPort
+
+ _, haveAllowAllKey := ms.Get(allowAllKey)
+ l4Only, haveL4OnlyKey := ms.Get(key)
+ addL4OnlyKey := false
+ if haveL4OnlyKey && !l4Only.IsDeny && l4Only.ProxyPort == 0 {
+ // 1. Change existing L4-only ALLOW key on matching port that does not already
+ // redirect to redirect.
+ e.PolicyDebug(logrus.Fields{
+ logfields.BPFMapKey: key,
+ logfields.BPFMapValue: entry,
+ }, "AddVisibilityKeys: Changing L4-only ALLOW key for visibility redirect")
+ ms.addKeyWithChanges(key, entry, changes)
+ }
+ if haveAllowAllKey && !haveL4OnlyKey {
+ // 2. If allow-all policy exists, add L4-only visibility redirect key if the L4-only
+ // key does not already exist.
+ e.PolicyDebug(logrus.Fields{
+ logfields.BPFMapKey: key,
+ logfields.BPFMapValue: entry,
+ }, "AddVisibilityKeys: Adding L4-only ALLOW key for visibility redirect")
+ addL4OnlyKey = true
+ ms.addKeyWithChanges(key, entry, changes)
+ }
+ //
+ // Loop through all L3 keys in the traffic direction of the new key
+ //
+ ms.ForEach(func(k Key, v MapStateEntry) bool {
+ if k.TrafficDirection != key.TrafficDirection || k.Identity == 0 {
+ return true
+ }
+ if k.DestPort == key.DestPort && k.Nexthdr == key.Nexthdr {
+ //
+ // Same L4
+ //
+ if !v.IsDeny && v.ProxyPort == 0 {
+ // 3. Change all L3/L4 ALLOW keys on matching port that do not
+ // already redirect to redirect.
+ v.ProxyPort = redirectPort
+ v.DerivedFromRules = visibilityDerivedFrom
+ e.PolicyDebug(logrus.Fields{
+ logfields.BPFMapKey: k,
+ logfields.BPFMapValue: v,
+ }, "AddVisibilityKeys: Changing L3/L4 ALLOW key for visibility redirect")
+ ms.addKeyWithChanges(k, v, changes)
+ }
+ } else if k.DestPort == 0 && k.Nexthdr == 0 {
+ //
+ // Wildcarded L4, i.e., L3-only
+ //
+ k2 := k
+ k2.DestPort = key.DestPort
+ k2.Nexthdr = key.Nexthdr
+ if !v.IsDeny && !haveL4OnlyKey && !addL4OnlyKey {
+ // 4. For each L3-only ALLOW key add the corresponding L3/L4
+ // ALLOW redirect if no L3/L4 key already exists and no
+ // L4-only key already exists and one is not added.
+ if _, ok := ms.Get(k2); !ok {
+ d2 := labels.LabelArrayList{visibilityDerivedFromLabels}
+ d2.MergeSorted(v.DerivedFromRules)
+ v2 := NewMapStateEntry(k, d2, true, false, v.hasAuthType, v.AuthType)
+ v2.ProxyPort = redirectPort
+ e.PolicyDebug(logrus.Fields{
+ logfields.BPFMapKey: k2,
+ logfields.BPFMapValue: v2,
+ }, "AddVisibilityKeys: Extending L3-only ALLOW key to L3/L4 key for visibility redirect")
+ ms.addKeyWithChanges(k2, v2, changes)
+
+ // Mark the new entry as a dependent of 'v'
+ ms.addDependentOnEntry(k, v, k2, changes)
+ }
+ } else if addL4OnlyKey && v.IsDeny {
+ // 5. If a new L4-only key was added: For each L3-only DENY
+ // key add the corresponding L3/L4 DENY key if no L3/L4
+ // key already exists.
+ if _, ok := ms.Get(k2); !ok {
+ v2 := NewMapStateEntry(k, v.DerivedFromRules, false, true, DefaultAuthType, AuthTypeDisabled)
+ e.PolicyDebug(logrus.Fields{
+ logfields.BPFMapKey: k2,
+ logfields.BPFMapValue: v2,
+ }, "AddVisibilityKeys: Extending L3-only DENY key to L3/L4 key to deny a port with visibility annotation")
+ ms.addKeyWithChanges(k2, v2, changes)
+
+ // Mark the new entry as a dependent of 'v'
+ ms.addDependentOnEntry(k, v, k2, changes)
+ }
+ }
+ }
+
+ return true
+ })
+}
+
+// determineAllowLocalhostIngress determines whether communication should be allowed
+// from the localhost. It inserts the Key corresponding to the localhost in
+// the desiredPolicyKeys if the localhost is allowed to communicate with the
+// endpoint. Authentication for localhost traffic is not required.
+func (ms *mapState) determineAllowLocalhostIngress() {
+ if option.Config.AlwaysAllowLocalhost() {
+ derivedFrom := labels.LabelArrayList{
+ labels.LabelArray{
+ labels.NewLabel(LabelKeyPolicyDerivedFrom, LabelAllowLocalHostIngress, labels.LabelSourceReserved),
+ },
+ }
+ es := NewMapStateEntry(nil, derivedFrom, false, false, ExplicitAuthType, AuthTypeDisabled) // Authentication never required for local host ingress
+ ms.denyPreferredInsert(localHostKey, es, nil, allFeatures)
+ if !option.Config.EnableRemoteNodeIdentity {
+ var isHostDenied bool
+ v, ok := ms.Get(localHostKey)
+ isHostDenied = ok && v.IsDeny
+ derivedFrom := labels.LabelArrayList{
+ labels.LabelArray{
+ labels.NewLabel(LabelKeyPolicyDerivedFrom, LabelAllowRemoteHostIngress, labels.LabelSourceReserved),
+ },
+ }
+ es := NewMapStateEntry(nil, derivedFrom, false, isHostDenied, ExplicitAuthType, AuthTypeDisabled) // Authentication never required for remote node ingress
+ ms.denyPreferredInsert(localRemoteNodeKey, es, nil, allFeatures)
+ }
+ }
+}
+
+// allowAllIdentities translates all identities in selectorCache to their
+// corresponding Keys in the specified direction (ingress, egress) which allows
+// all at L3.
+// Note that this is used when policy is not enforced, so authentication is explicitly not required.
+func (ms *mapState) allowAllIdentities(ingress, egress bool) {
+ if ingress {
+ keyToAdd := Key{
+ Identity: 0,
+ DestPort: 0,
+ Nexthdr: 0,
+ TrafficDirection: trafficdirection.Ingress.Uint8(),
+ }
+ derivedFrom := labels.LabelArrayList{
+ labels.LabelArray{
+ labels.NewLabel(LabelKeyPolicyDerivedFrom, LabelAllowAnyIngress, labels.LabelSourceReserved),
+ },
+ }
+ ms.allows[keyToAdd] = NewMapStateEntry(nil, derivedFrom, false, false, ExplicitAuthType, AuthTypeDisabled)
+ }
+ if egress {
+ keyToAdd := Key{
+ Identity: 0,
+ DestPort: 0,
+ Nexthdr: 0,
+ TrafficDirection: trafficdirection.Egress.Uint8(),
+ }
+ derivedFrom := labels.LabelArrayList{
+ labels.LabelArray{
+ labels.NewLabel(LabelKeyPolicyDerivedFrom, LabelAllowAnyEgress, labels.LabelSourceReserved),
+ },
+ }
+ ms.allows[keyToAdd] = NewMapStateEntry(nil, derivedFrom, false, false, ExplicitAuthType, AuthTypeDisabled)
+ }
+}
+
+func (ms *mapState) deniesL4(policyOwner PolicyOwner, l4 *L4Filter) bool {
+ port := uint16(l4.Port)
+ proto := uint8(l4.U8Proto)
+
+ // resolve named port
+ if port == 0 && l4.PortName != "" {
+ port = policyOwner.GetNamedPort(l4.Ingress, l4.PortName, proto)
+ if port == 0 {
+ return true
+ }
+ }
+
+ var dir uint8
+ if l4.Ingress {
+ dir = trafficdirection.Ingress.Uint8()
+ } else {
+ dir = trafficdirection.Egress.Uint8()
+ }
+ anyKey := Key{
+ Identity: 0,
+ DestPort: 0,
+ Nexthdr: 0,
+ TrafficDirection: dir,
+ }
+ // Are we explicitly denying all traffic?
+ v, ok := ms.Get(anyKey)
+ if ok && v.IsDeny {
+ return true
+ }
+
+ // Are we explicitly denying this L4-only traffic?
+ anyKey.DestPort = port
+ anyKey.Nexthdr = proto
+ v, ok = ms.Get(anyKey)
+ if ok && v.IsDeny {
+ return true
+ }
+
+ // The given L4 is not categorically denied.
+ // Traffic to/from a specific L3 on any of the selectors can still be denied.
+ return false
+}
+
+func (ms *mapState) GetIdentities(log *logrus.Logger) (ingIdentities, egIdentities []int64) {
+ return ms.getIdentities(log, false)
+}
+
+func (ms *mapState) GetDenyIdentities(log *logrus.Logger) (ingIdentities, egIdentities []int64) {
+ return ms.getIdentities(log, true)
+}
+
+// GetIdentities returns the ingress and egress identities stored in the
+// MapState.
+func (ms *mapState) getIdentities(log *logrus.Logger, denied bool) (ingIdentities, egIdentities []int64) {
+ ms.ForEach(func(policyMapKey Key, policyMapValue MapStateEntry) bool {
+ if denied != policyMapValue.IsDeny {
+ return true
+ }
+ if policyMapKey.DestPort != 0 {
+ // If the port is non-zero, then the Key no longer only applies
+ // at L3. AllowedIngressIdentities and AllowedEgressIdentities
+ // contain sets of which identities (i.e., label-based L3 only)
+ // are allowed, so anything which contains L4-related policy should
+ // not be added to these sets.
+ return true
+ }
+ switch trafficdirection.TrafficDirection(policyMapKey.TrafficDirection) {
+ case trafficdirection.Ingress:
+ ingIdentities = append(ingIdentities, int64(policyMapKey.Identity))
+ case trafficdirection.Egress:
+ egIdentities = append(egIdentities, int64(policyMapKey.Identity))
+ default:
+ td := trafficdirection.TrafficDirection(policyMapKey.TrafficDirection)
+ log.WithField(logfields.TrafficDirection, td).
+ Errorf("Unexpected traffic direction present in policy map state for endpoint")
+ }
+ return true
+ })
+ return ingIdentities, egIdentities
+}
+
+// MapChanges collects updates to the endpoint policy on the
+// granularity of individual mapstate key-value pairs for both adds
+// and deletes. 'mutex' must be held for any access.
+type MapChanges struct {
+ mutex lock.Mutex
+ changes []MapChange
+}
+
+type MapChange struct {
+ Add bool // false deletes
+ Key Key
+ Value MapStateEntry
+}
+
+// AccumulateMapChanges accumulates the given changes to the
+// MapChanges.
+//
+// The caller is responsible for making sure the same identity is not
+// present in both 'adds' and 'deletes'.
+func (mc *MapChanges) AccumulateMapChanges(cs CachedSelector, adds, deletes []identity.NumericIdentity,
+ port uint16, proto uint8, direction trafficdirection.TrafficDirection,
+ redirect, isDeny bool, hasAuth HasAuthType, authType AuthType, derivedFrom labels.LabelArrayList) {
+ key := Key{
+ // The actual identity is set in the loops below
+ Identity: 0,
+ // NOTE: Port is in host byte-order!
+ DestPort: port,
+ Nexthdr: proto,
+ TrafficDirection: direction.Uint8(),
+ }
+
+ value := NewMapStateEntry(cs, derivedFrom, redirect, isDeny, hasAuth, authType)
+
+ if option.Config.Debug {
+ authString := "default"
+ if hasAuth {
+ authString = authType.String()
+ }
+ log.WithFields(logrus.Fields{
+ logfields.EndpointSelector: cs,
+ logfields.AddedPolicyID: adds,
+ logfields.DeletedPolicyID: deletes,
+ logfields.Port: port,
+ logfields.Protocol: proto,
+ logfields.TrafficDirection: direction,
+ logfields.IsRedirect: redirect,
+ logfields.AuthType: authString,
+ }).Debug("AccumulateMapChanges")
+ }
+
+ mc.mutex.Lock()
+ for _, id := range adds {
+ key.Identity = id.Uint32()
+ mc.changes = append(mc.changes, MapChange{Add: true, Key: key, Value: value})
+ }
+ for _, id := range deletes {
+ key.Identity = id.Uint32()
+ mc.changes = append(mc.changes, MapChange{Add: false, Key: key, Value: value})
+ }
+ mc.mutex.Unlock()
+}
+
+// consumeMapChanges transfers the incremental changes from MapChanges to the caller,
+// while applying the changes to PolicyMapState.
+func (mc *MapChanges) consumeMapChanges(policyMapState MapState, features policyFeatures, identities Identities) (adds, deletes Keys) {
+ mc.mutex.Lock()
+ changes := ChangeState{
+ Adds: make(Keys, len(mc.changes)),
+ Deletes: make(Keys, len(mc.changes)),
+ }
+
+ for i := range mc.changes {
+ if mc.changes[i].Add {
+ // insert but do not allow non-redirect entries to overwrite a redirect entry,
+ // nor allow non-deny entries to overwrite deny entries.
+ // Collect the incremental changes to the overall state in 'mc.adds' and 'mc.deletes'.
+ policyMapState.denyPreferredInsertWithChanges(mc.changes[i].Key, mc.changes[i].Value, identities, features, changes)
+ } else {
+ // Delete the contribution of this cs to the key and collect incremental changes
+ for cs := range mc.changes[i].Value.owners { // get the sole selector
+ policyMapState.deleteKeyWithChanges(mc.changes[i].Key, cs, changes)
+ }
+ }
+ }
+ mc.changes = nil
+ mc.mutex.Unlock()
+ return changes.Adds, changes.Deletes
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/policy/policy.go b/vendor/github.com/cilium/cilium/pkg/policy/policy.go
new file mode 100644
index 000000000..861530dd8
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/policy/policy.go
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package policy
+
+import (
+ "io"
+ stdlog "log"
+ "strconv"
+ "strings"
+
+ "github.com/cilium/cilium/api/v1/models"
+ "github.com/cilium/cilium/pkg/labels"
+ "github.com/cilium/cilium/pkg/policy/api"
+)
+
+type Tracing int
+
+const (
+ TRACE_DISABLED Tracing = iota
+ TRACE_ENABLED
+ TRACE_VERBOSE
+)
+
+// TraceEnabled returns true if the SearchContext requests tracing.
+func (s *SearchContext) TraceEnabled() bool {
+ return s.Trace != TRACE_DISABLED
+}
+
+// PolicyTrace logs the given message into the SearchContext logger only if
+// TRACE_ENABLED or TRACE_VERBOSE is enabled in the receiver's SearchContext.
+func (s *SearchContext) PolicyTrace(format string, a ...interface{}) {
+ if s.TraceEnabled() {
+ log.Debugf(format, a...)
+ if s.Logging != nil {
+ format = "%-" + s.CallDepth() + "s" + format
+ a = append([]interface{}{""}, a...)
+ s.Logging.Printf(format, a...)
+ }
+ }
+}
+
+// PolicyTraceVerbose logs the given message into the SearchContext logger only
+// if TRACE_VERBOSE is enabled in the receiver's SearchContext.
+func (s *SearchContext) PolicyTraceVerbose(format string, a ...interface{}) {
+ switch s.Trace {
+ case TRACE_VERBOSE:
+ log.Debugf(format, a...)
+ if s.Logging != nil {
+ s.Logging.Printf(format, a...)
+ }
+ }
+}
+
+// SearchContext defines the context while evaluating policy
+type SearchContext struct {
+ Trace Tracing
+ Depth int
+ Logging *stdlog.Logger
+ From labels.LabelArray
+ To labels.LabelArray
+ DPorts []*models.Port
+ // rulesSelect specifies whether or not to check whether a rule which is
+ // being analyzed using this SearchContext matches either From or To.
+ // This is used to avoid using EndpointSelector.Matches() if possible,
+ // since it is costly in terms of performance.
+ rulesSelect bool
+}
+
+func (s *SearchContext) String() string {
+ from := make([]string, 0, len(s.From))
+ to := make([]string, 0, len(s.To))
+ dports := make([]string, 0, len(s.DPorts))
+ for _, fromLabel := range s.From {
+ from = append(from, fromLabel.String())
+ }
+ for _, toLabel := range s.To {
+ to = append(to, toLabel.String())
+ }
+ // We should avoid to use `fmt.Sprintf()` since
+ // it is well-known for not being opimal in terms of
+ // CPU and memory allocations.
+ // See https://github.com/cilium/cilium/issues/19571
+ for _, dport := range s.DPorts {
+ dportStr := dport.Name
+ if dportStr == "" {
+ dportStr = strconv.FormatUint(uint64(dport.Port), 10)
+ }
+ dports = append(dports, dportStr+"/"+dport.Protocol)
+ }
+ fromStr := strings.Join(from, ", ")
+ toStr := strings.Join(to, ", ")
+ if len(dports) != 0 {
+ dportStr := strings.Join(dports, ", ")
+ return "From: [" + fromStr + "] => To: [" + toStr + "] Ports: [" + dportStr + "]"
+ }
+ return "From: [" + fromStr + "] => To: [" + toStr + "]"
+}
+
+func (s *SearchContext) CallDepth() string {
+ return strconv.Itoa(s.Depth * 2)
+}
+
+// WithLogger returns a shallow copy of the received SearchContext with the
+// logging set to write to 'log'.
+func (s *SearchContext) WithLogger(log io.Writer) *SearchContext {
+ result := *s
+ result.Logging = stdlog.New(log, "", 0)
+ if result.Trace == TRACE_DISABLED {
+ result.Trace = TRACE_ENABLED
+ }
+ return &result
+}
+
+// Translator is an interface for altering policy rules
+type Translator interface {
+ Translate(*api.Rule, *TranslationResult) error
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/policy/proxyid.go b/vendor/github.com/cilium/cilium/pkg/policy/proxyid.go
new file mode 100644
index 000000000..96029950a
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/policy/proxyid.go
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package policy
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/cilium/cilium/pkg/policy/trafficdirection"
+ "github.com/cilium/cilium/pkg/u8proto"
+)
+
+// ProxyID returns a unique string to identify a proxy mapping.
+func ProxyID(endpointID uint16, ingress bool, protocol string, port uint16) string {
+ direction := "egress"
+ if ingress {
+ direction = "ingress"
+ }
+ return strconv.FormatUint(uint64(endpointID), 10) + ":" + direction + ":" + protocol + ":" + strconv.FormatUint(uint64(port), 10)
+}
+
+// ProxyIDFromKey returns a unique string to identify a proxy mapping.
+func ProxyIDFromKey(endpointID uint16, key Key) string {
+ return ProxyID(endpointID, key.TrafficDirection == trafficdirection.Ingress.Uint8(), u8proto.U8proto(key.Nexthdr).String(), key.DestPort)
+}
+
+// ParseProxyID parses a proxy ID returned by ProxyID and returns its components.
+func ParseProxyID(proxyID string) (endpointID uint16, ingress bool, protocol string, port uint16, err error) {
+ comps := strings.Split(proxyID, ":")
+ if len(comps) != 4 {
+ err = fmt.Errorf("invalid proxy ID structure: %s", proxyID)
+ return
+ }
+ epID, err := strconv.ParseUint(comps[0], 10, 16)
+ if err != nil {
+ return
+ }
+ endpointID = uint16(epID)
+ ingress = comps[1] == "ingress"
+ protocol = comps[2]
+ l4port, err := strconv.ParseUint(comps[3], 10, 16)
+ if err != nil {
+ return
+ }
+ port = uint16(l4port)
+ return
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/policy/repository.go b/vendor/github.com/cilium/cilium/pkg/policy/repository.go
new file mode 100644
index 000000000..52cde0e92
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/policy/repository.go
@@ -0,0 +1,815 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package policy
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "net/netip"
+ "sync"
+ "sync/atomic"
+
+ cilium "github.com/cilium/proxy/go/cilium/api"
+
+ "github.com/cilium/cilium/api/v1/models"
+ "github.com/cilium/cilium/pkg/crypto/certificatemanager"
+ "github.com/cilium/cilium/pkg/eventqueue"
+ "github.com/cilium/cilium/pkg/identity"
+ "github.com/cilium/cilium/pkg/identity/cache"
+ k8sConst "github.com/cilium/cilium/pkg/k8s/apis/cilium.io"
+ "github.com/cilium/cilium/pkg/labels"
+ "github.com/cilium/cilium/pkg/lock"
+ "github.com/cilium/cilium/pkg/logging/logfields"
+ "github.com/cilium/cilium/pkg/metrics"
+ "github.com/cilium/cilium/pkg/option"
+ "github.com/cilium/cilium/pkg/policy/api"
+)
+
+// PolicyContext is an interface policy resolution functions use to access the Repository.
+// This way testing code can run without mocking a full Repository.
+type PolicyContext interface {
+ // return the namespace in which the policy rule is being resolved
+ GetNamespace() string
+
+ // return the SelectorCache
+ GetSelectorCache() *SelectorCache
+
+ // GetTLSContext resolves the given 'api.TLSContext' into CA
+ // certs and the public and private keys, using secrets from
+ // k8s or from the local file system.
+ GetTLSContext(tls *api.TLSContext) (ca, public, private string, err error)
+
+ // GetEnvoyHTTPRules translates the given 'api.L7Rules' into
+ // the protobuf representation the Envoy can consume. The bool
+ // return parameter tells whether the the rule enforcement can
+ // be short-circuited upon the first allowing rule. This is
+ // false if any of the rules has side-effects, requiring all
+ // such rules being evaluated.
+ GetEnvoyHTTPRules(l7Rules *api.L7Rules) (*cilium.HttpNetworkPolicyRules, bool)
+
+ // IsDeny returns true if the policy computation should be done for the
+ // policy deny case. This function returns different values depending on the
+ // code path as it can be changed during the policy calculation.
+ IsDeny() bool
+
+ // SetDeny sets the Deny field of the PolicyContext and returns the old
+ // value stored.
+ SetDeny(newValue bool) (oldValue bool)
+}
+
+type policyContext struct {
+ repo *Repository
+ ns string
+ // isDeny this field is set to true if the given policy computation should
+ // be done for the policy deny.
+ isDeny bool
+}
+
+// GetNamespace() returns the namespace for the policy rule being resolved
+func (p *policyContext) GetNamespace() string {
+ return p.ns
+}
+
+// GetSelectorCache() returns the selector cache used by the Repository
+func (p *policyContext) GetSelectorCache() *SelectorCache {
+ return p.repo.GetSelectorCache()
+}
+
+// GetTLSContext() returns data for TLS Context via a CertificateManager
+func (p *policyContext) GetTLSContext(tls *api.TLSContext) (ca, public, private string, err error) {
+ if p.repo.certManager == nil {
+ return "", "", "", fmt.Errorf("No Certificate Manager set on Policy Repository")
+ }
+ return p.repo.certManager.GetTLSContext(context.TODO(), tls, p.ns)
+}
+
+func (p *policyContext) GetEnvoyHTTPRules(l7Rules *api.L7Rules) (*cilium.HttpNetworkPolicyRules, bool) {
+ return p.repo.GetEnvoyHTTPRules(l7Rules, p.ns)
+}
+
+// IsDeny returns true if the policy computation should be done for the
+// policy deny case. This function return different values depending on the
+// code path as it can be changed during the policy calculation.
+func (p *policyContext) IsDeny() bool {
+ return p.isDeny
+}
+
+// SetDeny sets the Deny field of the PolicyContext and returns the old
+// value stored.
+func (p *policyContext) SetDeny(deny bool) bool {
+ oldDeny := p.isDeny
+ p.isDeny = deny
+ return oldDeny
+}
+
+// Repository is a list of policy rules which in combination form the security
+// policy. A policy repository can be
+type Repository struct {
+ // Mutex protects the whole policy tree
+ Mutex lock.RWMutex
+ rules ruleSlice
+
+ // rulesIndexByK8sUID indexes the rules by k8s UID.
+ rulesIndexByK8sUID map[string]*rule
+
+ // revision is the revision of the policy repository. It will be
+ // incremented whenever the policy repository is changed.
+ // Always positive (>0).
+ revision atomic.Uint64
+
+ // RepositoryChangeQueue is a queue which serializes changes to the policy
+ // repository.
+ RepositoryChangeQueue *eventqueue.EventQueue
+
+ // RuleReactionQueue is a queue which serializes the resultant events that
+ // need to occur after updating the state of the policy repository. This
+ // can include queueing endpoint regenerations, policy revision increments
+ // for endpoints, etc.
+ RuleReactionQueue *eventqueue.EventQueue
+
+ // SelectorCache tracks the selectors used in the policies
+ // resolved from the repository.
+ selectorCache *SelectorCache
+
+ // PolicyCache tracks the selector policies created from this repo
+ policyCache *PolicyCache
+
+ certManager certificatemanager.CertificateManager
+ secretManager certificatemanager.SecretManager
+
+ getEnvoyHTTPRules func(certificatemanager.SecretManager, *api.L7Rules, string) (*cilium.HttpNetworkPolicyRules, bool)
+}
+
+// GetSelectorCache() returns the selector cache used by the Repository
+func (p *Repository) GetSelectorCache() *SelectorCache {
+ return p.selectorCache
+}
+
+// GetAuthTypes returns the AuthTypes required by the policy between the localID and remoteID
+func (p *Repository) GetAuthTypes(localID, remoteID identity.NumericIdentity) AuthTypes {
+ return p.policyCache.GetAuthTypes(localID, remoteID)
+}
+
+func (p *Repository) SetEnvoyRulesFunc(f func(certificatemanager.SecretManager, *api.L7Rules, string) (*cilium.HttpNetworkPolicyRules, bool)) {
+ p.getEnvoyHTTPRules = f
+}
+
+func (p *Repository) GetEnvoyHTTPRules(l7Rules *api.L7Rules, ns string) (*cilium.HttpNetworkPolicyRules, bool) {
+ if p.getEnvoyHTTPRules == nil {
+ return nil, true
+ }
+ return p.getEnvoyHTTPRules(p.secretManager, l7Rules, ns)
+}
+
+// GetPolicyCache() returns the policy cache used by the Repository
+func (p *Repository) GetPolicyCache() *PolicyCache {
+ return p.policyCache
+}
+
+// NewPolicyRepository creates a new policy repository.
+func NewPolicyRepository(
+ idAllocator cache.IdentityAllocator,
+ idCache cache.IdentityCache,
+ certManager certificatemanager.CertificateManager,
+ secretManager certificatemanager.SecretManager,
+) *Repository {
+ repo := NewStoppedPolicyRepository(idAllocator, idCache, certManager, secretManager)
+ repo.Start()
+ return repo
+}
+
+// NewStoppedPolicyRepository creates a new policy repository without starting
+// queues.
+//
+// Qeues must be allocated via [Repository.Start]. The function serves to
+// satisfy hive invariants.
+func NewStoppedPolicyRepository(
+ idAllocator cache.IdentityAllocator,
+ idCache cache.IdentityCache,
+ certManager certificatemanager.CertificateManager,
+ secretManager certificatemanager.SecretManager,
+) *Repository {
+ selectorCache := NewSelectorCache(idAllocator, idCache)
+ repo := &Repository{
+ rulesIndexByK8sUID: map[string]*rule{},
+ selectorCache: selectorCache,
+ certManager: certManager,
+ secretManager: secretManager,
+ }
+ repo.revision.Store(1)
+ repo.policyCache = NewPolicyCache(repo, true)
+ return repo
+}
+
+// traceState is an internal structure used to collect information
+// while determining policy decision
+type traceState struct {
+ // selectedRules is the number of rules with matching EndpointSelector
+ selectedRules int
+
+ // matchedRules is the number of rules that have allowed traffic
+ matchedRules int
+
+ // matchedDenyRules is the number of rules that have denied traffic
+ matchedDenyRules int
+
+ // constrainedRules counts how many "FromRequires" constraints are
+ // unsatisfied
+ constrainedRules int
+
+ // ruleID is the rule ID currently being evaluated
+ ruleID int
+}
+
+func (state *traceState) trace(rules int, ctx *SearchContext) {
+ ctx.PolicyTrace("%d/%d rules selected\n", state.selectedRules, rules)
+ if state.constrainedRules > 0 {
+ ctx.PolicyTrace("Found unsatisfied FromRequires constraint\n")
+ } else {
+ if state.matchedRules > 0 {
+ ctx.PolicyTrace("Found allow rule\n")
+ } else {
+ ctx.PolicyTrace("Found no allow rule\n")
+ }
+ if state.matchedDenyRules > 0 {
+ ctx.PolicyTrace("Found deny rule\n")
+ } else {
+ ctx.PolicyTrace("Found no deny rule\n")
+ }
+ }
+}
+
+// Start allocates and starts various queues used by the Repository.
+//
+// Must only be called if using [NewStoppedPolicyRepository]
+func (p *Repository) Start() {
+ p.RepositoryChangeQueue = eventqueue.NewEventQueueBuffered("repository-change-queue", option.Config.PolicyQueueSize)
+ p.RuleReactionQueue = eventqueue.NewEventQueueBuffered("repository-reaction-queue", option.Config.PolicyQueueSize)
+ p.RepositoryChangeQueue.Run()
+ p.RuleReactionQueue.Run()
+}
+
+// ResolveL4IngressPolicy resolves the L4 ingress policy for a set of endpoints
+// by searching the policy repository for `PortRule` rules that are attached to
+// a `Rule` where the EndpointSelector matches `ctx.To`. `ctx.From` takes no effect and
+// is ignored in the search. If multiple `PortRule` rules are found, all rules
+// are merged together. If rules contains overlapping port definitions, the first
+// rule found in the repository takes precedence.
+//
+// TODO: Coalesce l7 rules?
+//
+// Caller must release resources by calling Detach() on the returned map!
+//
+// NOTE: This is only called from unit tests, but from multiple packages.
+func (p *Repository) ResolveL4IngressPolicy(ctx *SearchContext) (L4PolicyMap, error) {
+ policyCtx := policyContext{
+ repo: p,
+ ns: ctx.To.Get(labels.LabelSourceK8sKeyPrefix + k8sConst.PodNamespaceLabel),
+ }
+ result, err := p.rules.resolveL4IngressPolicy(&policyCtx, ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ return result, nil
+}
+
+// ResolveL4EgressPolicy resolves the L4 egress policy for a set of endpoints
+// by searching the policy repository for `PortRule` rules that are attached to
+// a `Rule` where the EndpointSelector matches `ctx.From`. `ctx.To` takes no effect and
+// is ignored in the search. If multiple `PortRule` rules are found, all rules
+// are merged together. If rules contains overlapping port definitions, the first
+// rule found in the repository takes precedence.
+//
+// Caller must release resources by calling Detach() on the returned map!
+//
+// NOTE: This is only called from unit tests, but from multiple packages.
+func (p *Repository) ResolveL4EgressPolicy(ctx *SearchContext) (L4PolicyMap, error) {
+ policyCtx := policyContext{
+ repo: p,
+ ns: ctx.From.Get(labels.LabelSourceK8sKeyPrefix + k8sConst.PodNamespaceLabel),
+ }
+ result, err := p.rules.resolveL4EgressPolicy(&policyCtx, ctx)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return result, nil
+}
+
+// AllowsIngressRLocked evaluates the policy repository for the provided search
+// context and returns the verdict for ingress. If no matching policy allows for
+// the connection, the request will be denied. The policy repository mutex must
+// be held.
+//
+// NOTE: This is only called from unit tests, but from multiple packages.
+func (p *Repository) AllowsIngressRLocked(ctx *SearchContext) api.Decision {
+ // Lack of DPorts in the SearchContext means L3-only search
+ if len(ctx.DPorts) == 0 {
+ newCtx := *ctx
+ newCtx.DPorts = []*models.Port{{
+ Port: 0,
+ Protocol: models.PortProtocolANY,
+ }}
+ ctx = &newCtx
+ }
+
+ ctx.PolicyTrace("Tracing %s", ctx.String())
+ ingressPolicy, err := p.ResolveL4IngressPolicy(ctx)
+ if err != nil {
+ log.WithError(err).Warn("Evaluation error while resolving L4 ingress policy")
+ }
+
+ verdict := api.Denied
+ if err == nil && len(ingressPolicy) > 0 {
+ verdict = ingressPolicy.IngressCoversContext(ctx)
+ }
+
+ ctx.PolicyTrace("Ingress verdict: %s", verdict.String())
+ ingressPolicy.Detach(p.GetSelectorCache())
+
+ return verdict
+}
+
+// AllowsEgressRLocked evaluates the policy repository for the provided search
+// context and returns the verdict. If no matching policy allows for the
+// connection, the request will be denied. The policy repository mutex must be
+// held.
+//
+// NOTE: This is only called from unit tests, but from multiple packages.
+func (p *Repository) AllowsEgressRLocked(ctx *SearchContext) api.Decision {
+ // Lack of DPorts in the SearchContext means L3-only search
+ if len(ctx.DPorts) == 0 {
+ newCtx := *ctx
+ newCtx.DPorts = []*models.Port{{
+ Port: 0,
+ Protocol: models.PortProtocolANY,
+ }}
+ ctx = &newCtx
+ }
+
+ ctx.PolicyTrace("Tracing %s\n", ctx.String())
+ egressPolicy, err := p.ResolveL4EgressPolicy(ctx)
+ if err != nil {
+ log.WithError(err).Warn("Evaluation error while resolving L4 egress policy")
+ }
+ verdict := api.Denied
+ if err == nil && len(egressPolicy) > 0 {
+ verdict = egressPolicy.EgressCoversContext(ctx)
+ }
+
+ ctx.PolicyTrace("Egress verdict: %s", verdict.String())
+ egressPolicy.Detach(p.GetSelectorCache())
+ return verdict
+}
+
+// SearchRLocked searches the policy repository for rules which match the
+// specified labels and will return an array of all rules which matched.
+func (p *Repository) SearchRLocked(lbls labels.LabelArray) api.Rules {
+ result := api.Rules{}
+
+ if uid := lbls.Get(labels.LabelSourceK8sKeyPrefix + k8sConst.PolicyLabelUID); uid != "" {
+ r, ok := p.rulesIndexByK8sUID[uid]
+ if ok {
+ result = append(result, &r.Rule)
+ }
+ return result
+ }
+ for _, r := range p.rules {
+ if r.Labels.Contains(lbls) {
+ result = append(result, &r.Rule)
+ }
+ }
+
+ return result
+}
+
+// Add inserts a rule into the policy repository
+// This is just a helper function for unit testing.
+// TODO: this should be in a test_helpers.go file or something similar
+// so we can clearly delineate what helpers are for testing.
+// NOTE: This is only called from unit tests, but from multiple packages.
+func (p *Repository) Add(r api.Rule) (uint64, map[uint16]struct{}, error) {
+ p.Mutex.Lock()
+ defer p.Mutex.Unlock()
+
+ if err := r.Sanitize(); err != nil {
+ return p.GetRevision(), nil, err
+ }
+
+ newList := make([]*api.Rule, 1)
+ newList[0] = &r
+ _, rev := p.AddListLocked(newList)
+ return rev, map[uint16]struct{}{}, nil
+}
+
+// AddListLocked inserts a rule into the policy repository with the repository already locked
+// Expects that the entire rule list has already been sanitized.
+func (p *Repository) AddListLocked(rules api.Rules) (ruleSlice, uint64) {
+
+ newList := make(ruleSlice, len(rules))
+ for i := range rules {
+ newRule := &rule{
+ Rule: *rules[i],
+ metadata: newRuleMetadata(),
+ }
+ newList[i] = newRule
+ if uid := rules[i].Labels.Get(labels.LabelSourceK8sKeyPrefix + k8sConst.PolicyLabelUID); uid != "" {
+ p.rulesIndexByK8sUID[uid] = newRule
+ }
+ }
+
+ p.rules = append(p.rules, newList...)
+ p.BumpRevision()
+ metrics.Policy.Add(float64(len(newList)))
+ return newList, p.GetRevision()
+}
+
+// removeIdentityFromRuleCaches removes the identity from the selector cache
+// in each rule in the repository.
+//
+// Returns a sync.WaitGroup that blocks until the policy operation is complete.
+// The repository read lock must be held until the waitgroup is complete.
+func (p *Repository) removeIdentityFromRuleCaches(identity *identity.Identity) *sync.WaitGroup {
+ var wg sync.WaitGroup
+ wg.Add(len(p.rules))
+ for _, r := range p.rules {
+ go func(rr *rule, wgg *sync.WaitGroup) {
+ rr.metadata.delete(identity)
+ wgg.Done()
+ }(r, &wg)
+ }
+ return &wg
+}
+
+// LocalEndpointIdentityAdded handles local identity add events.
+func (p *Repository) LocalEndpointIdentityAdded(*identity.Identity) {
+ // no-op for now.
+}
+
+// LocalEndpointIdentityRemoved handles local identity removal events to
+// remove references from rules in the repository to the specified identity.
+func (p *Repository) LocalEndpointIdentityRemoved(identity *identity.Identity) {
+ go func() {
+ scopedLog := log.WithField(logfields.Identity, identity)
+ scopedLog.Debug("Removing identity references from policy cache")
+ p.Mutex.RLock()
+ wg := p.removeIdentityFromRuleCaches(identity)
+ wg.Wait()
+ p.Mutex.RUnlock()
+ scopedLog.Debug("Finished cleaning policy cache")
+ }()
+}
+
+// AddList inserts a rule into the policy repository. It is used for
+// unit-testing purposes only.
+func (p *Repository) AddList(rules api.Rules) (ruleSlice, uint64) {
+ p.Mutex.Lock()
+ defer p.Mutex.Unlock()
+ return p.AddListLocked(rules)
+}
+
+// Iterate iterates the policy repository, calling f for each rule. It is safe
+// to execute Iterate concurrently.
+func (p *Repository) Iterate(f func(rule *api.Rule)) {
+ p.Mutex.RWMutex.Lock()
+ defer p.Mutex.RWMutex.Unlock()
+ for _, r := range p.rules {
+ f(&r.Rule)
+ }
+}
+
+// UpdateRulesEndpointsCaches updates the caches within each rule in r that
+// specify whether the rule selects the endpoints in eps. If any rule matches
+// the endpoints, it is added to the provided IDSet, and removed from the
+// provided EndpointSet. The provided WaitGroup is signaled for a given endpoint
+// when it is finished being processed.
+func (r ruleSlice) UpdateRulesEndpointsCaches(endpointsToBumpRevision, endpointsToRegenerate *EndpointSet, policySelectionWG *sync.WaitGroup) {
+ endpointsToBumpRevision.ForEachGo(policySelectionWG, func(epp Endpoint) {
+ endpointSelected, err := r.updateEndpointsCaches(epp)
+ if endpointSelected {
+ endpointsToRegenerate.Insert(epp)
+ }
+ // If we could not evaluate the rules against the current endpoint, or
+ // the endpoint is selected by the rules, remove it from the set of
+ // endpoints to bump the revision. If the error is non-nil, the
+ // endpoint is no longer in either set (endpointsToBumpRevision or
+ // endpointsToRegenerate, as we could not determine what to do for the
+ // endpoint). This is usually the case when the endpoint is no longer
+ // alive (i.e., it has been marked to be deleted).
+ if endpointSelected || err != nil {
+ if err != nil {
+ log.WithError(err).Debug("could not determine whether endpoint was selected by rule")
+ }
+ endpointsToBumpRevision.Delete(epp)
+ }
+ })
+}
+
+// DeleteByLabelsLocked deletes all rules in the policy repository which
+// contain the specified labels. Returns the revision of the policy repository
+// after deleting the rules, as well as now many rules were deleted.
+func (p *Repository) DeleteByLabelsLocked(lbls labels.LabelArray) (ruleSlice, uint64, int) {
+
+ deleted := 0
+ new := p.rules[:0]
+ deletedRules := ruleSlice{}
+
+ for _, r := range p.rules {
+ if !r.Labels.Contains(lbls) {
+ new = append(new, r)
+ } else {
+ deletedRules = append(deletedRules, r)
+ deleted++
+ }
+ }
+
+ if deleted > 0 {
+ p.BumpRevision()
+ p.rules = new
+ if uid := lbls.Get(labels.LabelSourceK8sKeyPrefix + k8sConst.PolicyLabelUID); uid != "" {
+ delete(p.rulesIndexByK8sUID, uid)
+ }
+ metrics.Policy.Sub(float64(deleted))
+ }
+
+ return deletedRules, p.GetRevision(), deleted
+}
+
+// DeleteByLabels deletes all rules in the policy repository which contain the
+// specified labels
+func (p *Repository) DeleteByLabels(lbls labels.LabelArray) (uint64, int) {
+ p.Mutex.Lock()
+ defer p.Mutex.Unlock()
+ _, rev, numDeleted := p.DeleteByLabelsLocked(lbls)
+ return rev, numDeleted
+}
+
+// JSONMarshalRules returns a slice of policy rules as string in JSON
+// representation
+func JSONMarshalRules(rules api.Rules) string {
+ b, err := json.MarshalIndent(rules, "", " ")
+ if err != nil {
+ return err.Error()
+ }
+ return string(b)
+}
+
+// GetJSON returns all rules of the policy repository as string in JSON
+// representation
+func (p *Repository) GetJSON() string {
+ p.Mutex.RLock()
+ defer p.Mutex.RUnlock()
+
+ result := api.Rules{}
+ for _, r := range p.rules {
+ result = append(result, &r.Rule)
+ }
+
+ return JSONMarshalRules(result)
+}
+
+// GetRulesMatching returns whether any of the rules in a repository contain a
+// rule with labels matching the labels in the provided LabelArray.
+//
+// Must be called with p.Mutex held
+func (p *Repository) GetRulesMatching(lbls labels.LabelArray) (ingressMatch bool, egressMatch bool) {
+ ingressMatch = false
+ egressMatch = false
+ for _, r := range p.rules {
+ rulesMatch := r.getSelector().Matches(lbls)
+ if rulesMatch {
+ if len(r.Ingress) > 0 {
+ ingressMatch = true
+ }
+ if len(r.IngressDeny) > 0 {
+ ingressMatch = true
+ }
+ if len(r.Egress) > 0 {
+ egressMatch = true
+ }
+ if len(r.EgressDeny) > 0 {
+ egressMatch = true
+ }
+ }
+
+ if ingressMatch && egressMatch {
+ return
+ }
+ }
+ return
+}
+
+// getMatchingRules returns whether any of the rules in a repository contain a
+// rule with labels matching the given security identity, as well as
+// a slice of all rules which match.
+//
+// Must be called with p.Mutex held
+func (p *Repository) getMatchingRules(securityIdentity *identity.Identity) (
+ ingressMatch, egressMatch bool,
+ matchingRules ruleSlice) {
+
+ matchingRules = []*rule{}
+ for _, r := range p.rules {
+ isNode := securityIdentity.ID == identity.ReservedIdentityHost
+ selectsNode := r.NodeSelector.LabelSelector != nil
+ if selectsNode != isNode {
+ continue
+ }
+ if ruleMatches := r.matches(securityIdentity); ruleMatches {
+ // Don't need to update whether ingressMatch is true if it already
+ // has been determined to be true - allows us to not have to check
+ // lenth of slice.
+ if !ingressMatch {
+ ingressMatch = len(r.Ingress) > 0 || len(r.IngressDeny) > 0
+ }
+ if !egressMatch {
+ egressMatch = len(r.Egress) > 0 || len(r.EgressDeny) > 0
+ }
+ matchingRules = append(matchingRules, r)
+ }
+ }
+ return
+}
+
+// NumRules returns the amount of rules in the policy repository.
+//
+// Must be called with p.Mutex held
+func (p *Repository) NumRules() int {
+ return len(p.rules)
+}
+
+// GetRevision returns the revision of the policy repository
+func (p *Repository) GetRevision() uint64 {
+ return p.revision.Load()
+}
+
+// Empty returns 'true' if repository has no rules, 'false' otherwise.
+//
+// Must be called without p.Mutex held
+func (p *Repository) Empty() bool {
+ p.Mutex.Lock()
+ defer p.Mutex.Unlock()
+ return p.NumRules() == 0
+}
+
+// TranslationResult contains the results of the rule translation
+type TranslationResult struct {
+ // NumToServicesRules is the number of ToServices rules processed while
+ // translating the rules
+ NumToServicesRules int
+
+ // BackendPrefixes contains all egress CIDRs that are to be added
+ // for the translation.
+ PrefixesToAdd []netip.Prefix
+
+ // BackendPrefixes contains all egress CIDRs that are to be removed
+ // for the translation.
+ PrefixesToRelease []netip.Prefix
+}
+
+// TranslateRules traverses rules and applies provided translator to rules
+//
+// Note: Only used by the k8s watcher.
+func (p *Repository) TranslateRules(translator Translator) (*TranslationResult, error) {
+ p.Mutex.Lock()
+ defer p.Mutex.Unlock()
+
+ result := &TranslationResult{}
+
+ for ruleIndex := range p.rules {
+ if err := translator.Translate(&p.rules[ruleIndex].Rule, result); err != nil {
+ return nil, err
+ }
+ }
+ return result, nil
+}
+
+// BumpRevision allows forcing policy regeneration
+func (p *Repository) BumpRevision() {
+ metrics.PolicyRevision.Inc()
+ p.revision.Add(1)
+}
+
+// GetRulesList returns the current policy
+func (p *Repository) GetRulesList() *models.Policy {
+ p.Mutex.RLock()
+ defer p.Mutex.RUnlock()
+
+ lbls := labels.ParseSelectLabelArrayFromArray([]string{})
+ ruleList := p.SearchRLocked(lbls)
+
+ return &models.Policy{
+ Revision: int64(p.GetRevision()),
+ Policy: JSONMarshalRules(ruleList),
+ }
+}
+
+// resolvePolicyLocked returns the selectorPolicy for the provided
+// identity from the set of rules in the repository. If the policy
+// cannot be generated due to conflicts at L4 or L7, returns an error.
+//
+// Must be performed while holding the Repository lock.
+func (p *Repository) resolvePolicyLocked(securityIdentity *identity.Identity) (*selectorPolicy, error) {
+ // First obtain whether policy applies in both traffic directions, as well
+ // as list of rules which actually select this endpoint. This allows us
+ // to not have to iterate through the entire rule list multiple times and
+ // perform the matching decision again when computing policy for each
+ // protocol layer, which is quite costly in terms of performance.
+ ingressEnabled, egressEnabled,
+ matchingRules :=
+ p.computePolicyEnforcementAndRules(securityIdentity)
+
+ calculatedPolicy := &selectorPolicy{
+ Revision: p.GetRevision(),
+ SelectorCache: p.GetSelectorCache(),
+ L4Policy: NewL4Policy(p.GetRevision()),
+ IngressPolicyEnabled: ingressEnabled,
+ EgressPolicyEnabled: egressEnabled,
+ }
+
+ lbls := securityIdentity.LabelArray
+ ingressCtx := SearchContext{
+ To: lbls,
+ rulesSelect: true,
+ }
+
+ egressCtx := SearchContext{
+ From: lbls,
+ rulesSelect: true,
+ }
+
+ if option.Config.TracingEnabled() {
+ ingressCtx.Trace = TRACE_ENABLED
+ egressCtx.Trace = TRACE_ENABLED
+ }
+
+ policyCtx := policyContext{
+ repo: p,
+ ns: lbls.Get(labels.LabelSourceK8sKeyPrefix + k8sConst.PodNamespaceLabel),
+ }
+
+ if ingressEnabled {
+ newL4IngressPolicy, err := matchingRules.resolveL4IngressPolicy(&policyCtx, &ingressCtx)
+ if err != nil {
+ return nil, err
+ }
+ calculatedPolicy.L4Policy.Ingress.PortRules = newL4IngressPolicy
+ }
+
+ if egressEnabled {
+ newL4EgressPolicy, err := matchingRules.resolveL4EgressPolicy(&policyCtx, &egressCtx)
+ if err != nil {
+ return nil, err
+ }
+ calculatedPolicy.L4Policy.Egress.PortRules = newL4EgressPolicy
+ }
+
+ // Make the calculated policy ready for incremental updates
+ calculatedPolicy.Attach(&policyCtx)
+
+ return calculatedPolicy, nil
+}
+
+// computePolicyEnforcementAndRules returns whether policy applies at ingress or ingress
+// for the given security identity, as well as a list of any rules which select
+// the set of labels of the given security identity.
+//
+// Must be called with repo mutex held for reading.
+func (p *Repository) computePolicyEnforcementAndRules(securityIdentity *identity.Identity) (
+ ingress, egress bool,
+ matchingRules ruleSlice,
+) {
+ lbls := securityIdentity.LabelArray
+
+ // Check if policy enforcement should be enabled at the daemon level.
+ if lbls.Has(labels.IDNameHost) && !option.Config.EnableHostFirewall {
+ return false, false, nil
+ }
+ switch GetPolicyEnabled() {
+ case option.AlwaysEnforce:
+ _, _, matchingRules = p.getMatchingRules(securityIdentity)
+ // If policy enforcement is enabled for the daemon, then it has to be
+ // enabled for the endpoint.
+ return true, true, matchingRules
+ case option.DefaultEnforcement:
+ ingress, egress, matchingRules = p.getMatchingRules(securityIdentity)
+ // If the endpoint has the reserved:init label, i.e. if it has not yet
+ // received any labels, always enforce policy (default deny).
+ if lbls.Has(labels.IDNameInit) {
+ return true, true, matchingRules
+ }
+
+ // Default mode means that if rules contain labels that match this
+ // endpoint, then enable policy enforcement for this endpoint.
+ return ingress, egress, matchingRules
+ default:
+ // If policy enforcement isn't enabled, we do not enable policy
+ // enforcement for the endpoint. We don't care about returning any
+ // rules that match.
+ return false, false, nil
+ }
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/policy/resolve.go b/vendor/github.com/cilium/cilium/pkg/policy/resolve.go
new file mode 100644
index 000000000..a6abf95ce
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/policy/resolve.go
@@ -0,0 +1,297 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package policy
+
+import (
+ "github.com/sirupsen/logrus"
+
+ "github.com/cilium/cilium/pkg/identity"
+ "github.com/cilium/cilium/pkg/policy/trafficdirection"
+)
+
+// selectorPolicy is a structure which contains the resolved policy for a
+// particular Identity across all layers (L3, L4, and L7), with the policy
+// still determined in terms of EndpointSelectors.
+type selectorPolicy struct {
+ // Revision is the revision of the policy repository used to generate
+ // this selectorPolicy.
+ Revision uint64
+
+ // SelectorCache managing selectors in L4Policy
+ SelectorCache *SelectorCache
+
+ // L4Policy contains the computed L4 and L7 policy.
+ L4Policy L4Policy
+
+ // IngressPolicyEnabled specifies whether this policy contains any policy
+ // at ingress.
+ IngressPolicyEnabled bool
+
+ // EgressPolicyEnabled specifies whether this policy contains any policy
+ // at egress.
+ EgressPolicyEnabled bool
+}
+
+func (p *selectorPolicy) Attach(ctx PolicyContext) {
+ p.L4Policy.Attach(ctx)
+}
+
+// EndpointPolicy is a structure which contains the resolved policy across all
+// layers (L3, L4, and L7), distilled against a set of identities.
+type EndpointPolicy struct {
+ // Note that all Endpoints sharing the same identity will be
+ // referring to a shared selectorPolicy!
+ *selectorPolicy
+
+ // policyMapState contains the state of this policy as it relates to the
+ // datapath. In the future, this will be factored out of this object to
+ // decouple the policy as it relates to the datapath vs. its userspace
+ // representation.
+ // It maps each Key to the proxy port if proxy redirection is needed.
+ // Proxy port 0 indicates no proxy redirection.
+ // All fields within the Key and the proxy port must be in host byte-order.
+ // Must only be accessed with PolicyOwner (aka Endpoint) lock taken.
+ policyMapState MapState
+
+ // policyMapChanges collects pending changes to the PolicyMapState
+ policyMapChanges MapChanges
+
+ // PolicyOwner describes any type which consumes this EndpointPolicy object.
+ PolicyOwner PolicyOwner
+}
+
+// PolicyOwner is anything which consumes a EndpointPolicy.
+type PolicyOwner interface {
+ GetID() uint64
+ LookupRedirectPortLocked(ingress bool, protocol string, port uint16) uint16
+ HasBPFPolicyMap() bool
+ GetNamedPort(ingress bool, name string, proto uint8) uint16
+ PolicyDebug(fields logrus.Fields, msg string)
+}
+
+// newSelectorPolicy returns an empty selectorPolicy stub.
+func newSelectorPolicy(selectorCache *SelectorCache) *selectorPolicy {
+ return &selectorPolicy{
+ Revision: 0,
+ SelectorCache: selectorCache,
+ L4Policy: NewL4Policy(0),
+ }
+}
+
+// insertUser adds a user to the L4Policy so that incremental
+// updates of the L4Policy may be fowarded.
+func (p *selectorPolicy) insertUser(user *EndpointPolicy) {
+ p.L4Policy.insertUser(user)
+}
+
+// removeUser removes a user from the L4Policy so the EndpointPolicy
+// can be freed when not needed any more
+func (p *selectorPolicy) removeUser(user *EndpointPolicy) {
+ p.L4Policy.removeUser(user)
+}
+
+// Detach releases resources held by a selectorPolicy to enable
+// successful eventual GC. Note that the selectorPolicy itself if not
+// modified in any way, so that it can be used concurrently.
+func (p *selectorPolicy) Detach() {
+ p.L4Policy.Detach(p.SelectorCache)
+}
+
+// DistillPolicy filters down the specified selectorPolicy (which acts
+// upon selectors) into a set of concrete map entries based on the
+// SelectorCache. These can subsequently be plumbed into the datapath.
+//
+// Must be performed while holding the Repository lock.
+// PolicyOwner (aka Endpoint) is also locked during this call.
+func (p *selectorPolicy) DistillPolicy(policyOwner PolicyOwner, isHost bool) *EndpointPolicy {
+ calculatedPolicy := &EndpointPolicy{
+ selectorPolicy: p,
+ policyMapState: NewMapState(nil),
+ PolicyOwner: policyOwner,
+ }
+
+ if !p.IngressPolicyEnabled || !p.EgressPolicyEnabled {
+ calculatedPolicy.policyMapState.allowAllIdentities(
+ !p.IngressPolicyEnabled, !p.EgressPolicyEnabled)
+ }
+
+ // Register the new EndpointPolicy as a receiver of delta
+ // updates. Any updates happening after this, but before
+ // computeDesiredL4PolicyMapEntries() call finishes may
+ // already be applied to the PolicyMapState, specifically:
+ //
+ // - policyMapChanges may contain an addition of an entry that
+ // is already added to the PolicyMapState
+ //
+ // - policyMapChanges may contain a deletion of an entry that
+ // has already been deleted from PolicyMapState
+ p.insertUser(calculatedPolicy)
+
+ // Must come after the 'insertUser()' above to guarantee
+ // PolicyMapChanges will contain all changes that are applied
+ // after the computation of PolicyMapState has started.
+ p.SelectorCache.mutex.RLock()
+ calculatedPolicy.toMapState()
+ if !isHost {
+ calculatedPolicy.policyMapState.determineAllowLocalhostIngress()
+ }
+ p.SelectorCache.mutex.RUnlock()
+
+ return calculatedPolicy
+}
+
+// GetPolicyMap gets the policy map state as the interface
+// MapState
+func (p *EndpointPolicy) GetPolicyMap() MapState {
+ return p.policyMapState
+}
+
+// SetPolicyMap sets the policy map state as the interface
+// MapState. If the main argument is nil, then this method
+// will initialize a new MapState object for the caller.
+func (p *EndpointPolicy) SetPolicyMap(ms MapState) {
+ if ms == nil {
+ p.policyMapState = NewMapState(nil)
+ return
+ }
+ p.policyMapState = ms
+}
+
+// Detach removes EndpointPolicy references from selectorPolicy
+// to allow the EndpointPolicy to be GC'd.
+// PolicyOwner (aka Endpoint) is also locked during this call.
+func (p *EndpointPolicy) Detach() {
+ p.selectorPolicy.removeUser(p)
+}
+
+// computeDesiredL4PolicyMapEntries transforms the EndpointPolicy.L4Policy into
+// the datapath-friendly format inside EndpointPolicy.PolicyMapState.
+// Called with selectorcache locked for reading
+func (p *EndpointPolicy) toMapState() {
+ p.L4Policy.Ingress.toMapState(p)
+ p.L4Policy.Egress.toMapState(p)
+}
+
+// Called with selectorcache locked for reading
+func (l4policy L4DirectionPolicy) toMapState(p *EndpointPolicy) {
+ for _, l4 := range l4policy.PortRules {
+ lookupDone := false
+ proxyport := uint16(0)
+ l4.toMapState(p, l4policy.features, func(keyFromFilter Key, entry *MapStateEntry) bool {
+ // Fix up the proxy port for entries that need proxy redirection
+ if entry.IsRedirectEntry() {
+ if !lookupDone {
+ // only lookup once for each filter
+ // Use 'destPort' from the key as it is already resolved
+ // from a named port if needed.
+ proxyport = p.PolicyOwner.LookupRedirectPortLocked(l4.Ingress, string(l4.Protocol), keyFromFilter.DestPort)
+ lookupDone = true
+ }
+ entry.ProxyPort = proxyport
+ // If the currently allocated proxy port is 0, this is a new
+ // redirect, for which no port has been allocated yet. Ignore
+ // it for now. This will be configured by
+ // UpdateRedirects() once the port has been allocated.
+ if !entry.IsRedirectEntry() {
+ return false
+ }
+ }
+ return true
+ }, ChangeState{
+ Old: newMapState(nil),
+ })
+ }
+}
+
+type getProxyPortFunc func(*L4Filter) (proxyPort uint16, ok bool)
+
+// UpdateRedirects updates redirects in the EndpointPolicy's PolicyMapState by using the provided
+// function to obtain a proxy port number to use. Changes to 'p.PolicyMapState' are collected in
+// 'adds' and 'updated' so that they can be reverted when needed.
+func (p *EndpointPolicy) UpdateRedirects(ingress bool, getProxyPort getProxyPortFunc, changes ChangeState) {
+ l4policy := &p.L4Policy.Ingress
+ if ingress {
+ l4policy = &p.L4Policy.Egress
+ }
+
+ l4policy.updateRedirects(p, getProxyPort, changes)
+}
+
+func (l4policy L4DirectionPolicy) updateRedirects(p *EndpointPolicy, getProxyPort getProxyPortFunc, changes ChangeState) {
+ // Selectorcache needs to be locked for toMapState (GetLabels()) call
+ p.SelectorCache.mutex.RLock()
+ defer p.SelectorCache.mutex.RUnlock()
+
+ for _, l4 := range l4policy.PortRules {
+ if l4.IsRedirect() {
+ // Check if we are denying this specific L4 first regardless the L3, if there are any deny policies
+ if l4policy.features.contains(denyRules) && p.policyMapState.deniesL4(p.PolicyOwner, l4) {
+ continue
+ }
+
+ redirectPort, ok := getProxyPort(l4)
+ if !ok {
+ continue
+ }
+
+ // Set the proxy port in the policy map.
+ l4.toMapState(p, l4policy.features, func(_ Key, entry *MapStateEntry) bool {
+ if entry.IsRedirectEntry() {
+ entry.ProxyPort = redirectPort
+ }
+ return true
+ }, changes)
+ }
+ }
+}
+
+// ConsumeMapChanges transfers the changes from MapChanges to the caller,
+// locking the selector cache to make sure concurrent identity updates
+// have completed.
+// PolicyOwner (aka Endpoint) is also locked during this call.
+func (p *EndpointPolicy) ConsumeMapChanges() (adds, deletes Keys) {
+ p.selectorPolicy.SelectorCache.mutex.Lock()
+ defer p.selectorPolicy.SelectorCache.mutex.Unlock()
+ features := p.selectorPolicy.L4Policy.Ingress.features | p.selectorPolicy.L4Policy.Egress.features
+ return p.policyMapChanges.consumeMapChanges(p.policyMapState, features, p.SelectorCache)
+}
+
+// AllowsIdentity returns whether the specified policy allows
+// ingress and egress traffic for the specified numeric security identity.
+// If the 'secID' is zero, it will check if all traffic is allowed.
+//
+// Returning true for either return value indicates all traffic is allowed.
+func (p *EndpointPolicy) AllowsIdentity(identity identity.NumericIdentity) (ingress, egress bool) {
+ key := Key{
+ Identity: uint32(identity),
+ }
+
+ if !p.IngressPolicyEnabled {
+ ingress = true
+ } else {
+ key.TrafficDirection = trafficdirection.Ingress.Uint8()
+ if v, exists := p.policyMapState.Get(key); exists && !v.IsDeny {
+ ingress = true
+ }
+ }
+
+ if !p.EgressPolicyEnabled {
+ egress = true
+ } else {
+ key.TrafficDirection = trafficdirection.Egress.Uint8()
+ if v, exists := p.policyMapState.Get(key); exists && !v.IsDeny {
+ egress = true
+ }
+ }
+
+ return ingress, egress
+}
+
+// NewEndpointPolicy returns an empty EndpointPolicy stub.
+func NewEndpointPolicy(repo *Repository) *EndpointPolicy {
+ return &EndpointPolicy{
+ selectorPolicy: newSelectorPolicy(repo.GetSelectorCache()),
+ policyMapState: NewMapState(nil),
+ }
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/policy/rule.go b/vendor/github.com/cilium/cilium/pkg/policy/rule.go
new file mode 100644
index 000000000..b636e4a7d
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/policy/rule.go
@@ -0,0 +1,790 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package policy
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/cilium/proxy/pkg/policy/api/kafka"
+
+ "github.com/cilium/cilium/pkg/identity"
+ slim_metav1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1"
+ "github.com/cilium/cilium/pkg/labels"
+ "github.com/cilium/cilium/pkg/lock"
+ "github.com/cilium/cilium/pkg/option"
+ "github.com/cilium/cilium/pkg/policy/api"
+)
+
+type rule struct {
+ api.Rule
+
+ metadata *ruleMetadata
+}
+
+type ruleMetadata struct {
+ // mutex protects all fields in this type.
+ Mutex lock.Mutex
+
+ // IdentitySelected is a cache that maps from an identity to whether
+ // this rule selects that identity.
+ IdentitySelected map[identity.NumericIdentity]bool
+}
+
+func newRuleMetadata() *ruleMetadata {
+ return &ruleMetadata{
+ IdentitySelected: make(map[identity.NumericIdentity]bool),
+ }
+}
+
+func (m *ruleMetadata) delete(identity *identity.Identity) {
+ m.Mutex.Lock()
+ defer m.Mutex.Unlock()
+ delete(m.IdentitySelected, identity.ID)
+}
+
+func (r *rule) String() string {
+ return r.EndpointSelector.String()
+}
+
+func (r *rule) getSelector() *api.EndpointSelector {
+ if r.NodeSelector.LabelSelector != nil {
+ return &r.NodeSelector
+ }
+ return &r.EndpointSelector
+}
+
+func (epd *PerSelectorPolicy) appendL7WildcardRule(ctx *SearchContext) api.L7Rules {
+ // Wildcard rule only needs to be appended if some rules already exist
+ switch {
+ case len(epd.L7Rules.HTTP) > 0:
+ rule := api.PortRuleHTTP{}
+ if !rule.Exists(epd.L7Rules) {
+ ctx.PolicyTrace(" Merging HTTP wildcard rule: %+v\n", rule)
+ epd.L7Rules.HTTP = append(epd.L7Rules.HTTP, rule)
+ } else {
+ ctx.PolicyTrace(" Merging HTTP wildcard rule, equal rule already exists: %+v\n", rule)
+ }
+ case len(epd.L7Rules.Kafka) > 0:
+ rule := kafka.PortRule{}
+ rule.Sanitize()
+ if !rule.Exists(epd.L7Rules.Kafka) {
+ ctx.PolicyTrace(" Merging Kafka wildcard rule: %+v\n", rule)
+ epd.L7Rules.Kafka = append(epd.L7Rules.Kafka, rule)
+ } else {
+ ctx.PolicyTrace(" Merging Kafka wildcard rule, equal rule already exists: %+v\n", rule)
+ }
+ case len(epd.L7Rules.DNS) > 0:
+ // Wildcarding at L7 for DNS is specified via allowing all via
+ // MatchPattern!
+ rule := api.PortRuleDNS{MatchPattern: "*"}
+ rule.Sanitize()
+ if !rule.Exists(epd.L7Rules) {
+ ctx.PolicyTrace(" Merging DNS wildcard rule: %+v\n", rule)
+ epd.L7Rules.DNS = append(epd.L7Rules.DNS, rule)
+ } else {
+ ctx.PolicyTrace(" Merging DNS wildcard rule, equal rule already exists: %+v\n", rule)
+ }
+ case epd.L7Rules.L7Proto != "" && len(epd.L7Rules.L7) > 0:
+ rule := api.PortRuleL7{}
+ if !rule.Exists(epd.L7Rules) {
+ ctx.PolicyTrace(" Merging L7 wildcard rule: %+v\n", rule)
+ epd.L7Rules.L7 = append(epd.L7Rules.L7, rule)
+ } else {
+ ctx.PolicyTrace(" Merging L7 wildcard rule, equal rule already exists: %+v\n", rule)
+ }
+ }
+ return epd.L7Rules
+}
+
+func mergePortProto(ctx *SearchContext, existingFilter, filterToMerge *L4Filter, selectorCache *SelectorCache) (err error) {
+ // Merge the L7-related data from the filter to merge
+ // with the L7-related data already in the existing filter.
+ existingFilter.L7Parser, err = existingFilter.L7Parser.Merge(filterToMerge.L7Parser)
+ if err != nil {
+ ctx.PolicyTrace(" Merge conflict: mismatching parsers %s/%s\n", filterToMerge.L7Parser, existingFilter.L7Parser)
+ return err
+ }
+
+ if existingFilter.Listener == "" || filterToMerge.Listener == "" {
+ if filterToMerge.Listener != "" {
+ existingFilter.Listener = filterToMerge.Listener
+ }
+ } else if filterToMerge.Listener != existingFilter.Listener {
+ ctx.PolicyTrace(" Merge conflict: mismatching CiliumEnvoyConfig listeners %v/%v\n", filterToMerge.Listener, existingFilter.Listener)
+ return fmt.Errorf("cannot merge conflicting CiliumEnvoyConfig Listeners (%v/%v)", filterToMerge.Listener, existingFilter.Listener)
+ }
+
+ for cs, newL7Rules := range filterToMerge.PerSelectorPolicies {
+ // 'cs' will be merged or moved (see below), either way it needs
+ // to be removed from the map it is in now.
+ delete(filterToMerge.PerSelectorPolicies, cs)
+
+ if l7Rules, ok := existingFilter.PerSelectorPolicies[cs]; ok {
+ // existing filter already has 'cs', release and merge L7 rules
+ selectorCache.RemoveSelector(cs, filterToMerge)
+
+ // skip merging for reserved:none, as it is never
+ // selected, and toFQDN rules currently translate to
+ // reserved:none as an endpoint selector, causing a
+ // merge conflict for different toFQDN destinations
+ // with different TLS contexts.
+ if cs.IsNone() {
+ continue
+ }
+
+ if l7Rules.Equal(newL7Rules) {
+ continue // identical rules need no merging
+ }
+
+ // Merge two non-identical sets of non-nil rules
+ if l7Rules != nil && l7Rules.IsDeny {
+ // If existing rule is deny then it's a no-op
+ // Denies takes priority over any rule.
+ continue
+ } else if newL7Rules != nil && newL7Rules.IsDeny {
+ // Overwrite existing filter if the new rule is a deny case
+ // Denies takes priority over any rule.
+ existingFilter.PerSelectorPolicies[cs] = newL7Rules
+ continue
+ }
+
+ // One of the rules may be a nil rule, expand it to an empty non-nil rule
+ if l7Rules == nil {
+ l7Rules = &PerSelectorPolicy{}
+ }
+ if newL7Rules == nil {
+ newL7Rules = &PerSelectorPolicy{}
+ }
+
+ // Merge isRedirect flag
+ l7Rules.isRedirect = l7Rules.isRedirect || newL7Rules.isRedirect
+
+ if l7Rules.Authentication == nil || newL7Rules.Authentication == nil {
+ if newL7Rules.Authentication != nil {
+ l7Rules.Authentication = newL7Rules.Authentication
+ }
+ } else if !newL7Rules.Authentication.DeepEqual(l7Rules.Authentication) {
+ ctx.PolicyTrace(" Merge conflict: mismatching auth types %s/%s\n", newL7Rules.Authentication.Mode, l7Rules.Authentication.Mode)
+ return fmt.Errorf("cannot merge conflicting authentication types (%s/%s)", newL7Rules.Authentication.Mode, l7Rules.Authentication.Mode)
+ }
+
+ if l7Rules.TerminatingTLS == nil || newL7Rules.TerminatingTLS == nil {
+ if newL7Rules.TerminatingTLS != nil {
+ l7Rules.TerminatingTLS = newL7Rules.TerminatingTLS
+ }
+ } else if !newL7Rules.TerminatingTLS.Equal(l7Rules.TerminatingTLS) {
+ ctx.PolicyTrace(" Merge conflict: mismatching terminating TLS contexts %v/%v\n", newL7Rules.TerminatingTLS, l7Rules.TerminatingTLS)
+ return fmt.Errorf("cannot merge conflicting terminating TLS contexts for cached selector %s: (%v/%v)", cs.String(), newL7Rules.TerminatingTLS, l7Rules.TerminatingTLS)
+ }
+ if l7Rules.OriginatingTLS == nil || newL7Rules.OriginatingTLS == nil {
+ if newL7Rules.OriginatingTLS != nil {
+ l7Rules.OriginatingTLS = newL7Rules.OriginatingTLS
+ }
+ } else if !newL7Rules.OriginatingTLS.Equal(l7Rules.OriginatingTLS) {
+ ctx.PolicyTrace(" Merge conflict: mismatching originating TLS contexts %v/%v\n", newL7Rules.OriginatingTLS, l7Rules.OriginatingTLS)
+ return fmt.Errorf("cannot merge conflicting originating TLS contexts for cached selector %s: (%v/%v)", cs.String(), newL7Rules.OriginatingTLS, l7Rules.OriginatingTLS)
+ }
+
+ // For now we simply merge the set of allowed SNIs from different rules
+ // to/from the *same remote*, port, and protocol. This means that if any
+ // rule requires SNI, then all traffic to that remote/port requires TLS,
+ // even if other merged rules would be fine without TLS. Any SNI from all
+ // applicable rules is allowed.
+ //
+ // Preferably we could allow different rules for each SNI, but for now the
+ // combination of all L7 rules is allowed for all the SNIs. For example, if
+ // SNI and TLS termination are used together so that L7 filtering is
+ // possible, in this example:
+ //
+ // - existing: SNI: public.example.com
+ // - new: SNI: private.example.com HTTP: path="/public"
+ //
+ // Separately, these rule allow access to all paths at SNI
+ // public.example.com and path private.example.com/public, but currently we
+ // allow all paths also at private.example.com. This may be clamped down if
+ // there is sufficient demand for SNI and TLS termination together.
+ //
+ // Note however that SNI rules are typically used with `toFQDNs`, each of
+ // which defines a separate destination, so that SNIs for different
+ // `toFQDNs` will not be merged together.
+ l7Rules.ServerNames = l7Rules.ServerNames.Merge(newL7Rules.ServerNames)
+
+ // L7 rules can be applied with SNI filtering only if the TLS is also
+ // terminated
+ if len(l7Rules.ServerNames) > 0 && !l7Rules.L7Rules.IsEmpty() && l7Rules.TerminatingTLS == nil {
+ ctx.PolicyTrace(" Merge conflict: cannot use SNI filtering with L7 rules without TLS termination: %v\n", l7Rules.ServerNames)
+ return fmt.Errorf("cannot merge L7 rules for cached selector %s with SNI filtering without TLS termination: %v", cs.String(), l7Rules.ServerNames)
+ }
+
+ // empty L7 rules effectively wildcard L7. When merging with a non-empty
+ // rule, the empty must be expanded to an actual wildcard rule for the
+ // specific L7
+ if !l7Rules.HasL7Rules() && newL7Rules.HasL7Rules() {
+ l7Rules.L7Rules = newL7Rules.appendL7WildcardRule(ctx)
+ existingFilter.PerSelectorPolicies[cs] = l7Rules
+ continue
+ }
+ if l7Rules.HasL7Rules() && !newL7Rules.HasL7Rules() {
+ l7Rules.appendL7WildcardRule(ctx)
+ existingFilter.PerSelectorPolicies[cs] = l7Rules
+ continue
+ }
+
+ // We already know from the L7Parser.Merge() above that there are no
+ // conflicting parser types, and rule validation only allows one type of L7
+ // rules in a rule, so we can just merge the rules here.
+ for _, newRule := range newL7Rules.HTTP {
+ if !newRule.Exists(l7Rules.L7Rules) {
+ l7Rules.HTTP = append(l7Rules.HTTP, newRule)
+ }
+ }
+ for _, newRule := range newL7Rules.Kafka {
+ if !newRule.Exists(l7Rules.L7Rules.Kafka) {
+ l7Rules.Kafka = append(l7Rules.Kafka, newRule)
+ }
+ }
+ if l7Rules.L7Proto == "" && newL7Rules.L7Proto != "" {
+ l7Rules.L7Proto = newL7Rules.L7Proto
+ }
+ for _, newRule := range newL7Rules.L7 {
+ if !newRule.Exists(l7Rules.L7Rules) {
+ l7Rules.L7 = append(l7Rules.L7, newRule)
+ }
+ }
+ for _, newRule := range newL7Rules.DNS {
+ if !newRule.Exists(l7Rules.L7Rules) {
+ l7Rules.DNS = append(l7Rules.DNS, newRule)
+ }
+ }
+ // Update the pointer in the map in case it was newly allocated
+ existingFilter.PerSelectorPolicies[cs] = l7Rules
+ } else { // 'cs' is not in the existing filter yet
+ // Update selector owner to the existing filter
+ selectorCache.ChangeUser(cs, filterToMerge, existingFilter)
+
+ // Move L7 rules over.
+ existingFilter.PerSelectorPolicies[cs] = newL7Rules
+
+ if cs.IsWildcard() {
+ existingFilter.wildcard = cs
+ }
+ }
+ }
+
+ return nil
+}
+
+// mergeIngressPortProto merges all rules which share the same port & protocol that
+// select a given set of endpoints. It updates the L4Filter mapped to by the specified
+// port and protocol with the contents of the provided PortRule. If the rule
+// being merged has conflicting L7 rules with those already in the provided
+// L4PolicyMap for the specified port-protocol tuple, it returns an error.
+//
+// If any rules contain L7 rules that select Host or Remote Node and we should
+// accept all traffic from host, the L7 rules will be translated into L7
+// wildcards via 'hostWildcardL7'. That is to say, traffic will be
+// forwarded to the proxy for endpoints matching those labels, but the proxy
+// will allow all such traffic.
+func mergeIngressPortProto(policyCtx PolicyContext, ctx *SearchContext, endpoints api.EndpointSelectorSlice, auth *api.Authentication, hostWildcardL7 []string,
+ r api.Ports, p api.PortProtocol, proto api.L4Proto, ruleLabels labels.LabelArray, resMap L4PolicyMap) (int, error) {
+ // Create a new L4Filter
+ filterToMerge, err := createL4IngressFilter(policyCtx, endpoints, auth, hostWildcardL7, r, p, proto, ruleLabels)
+ if err != nil {
+ return 0, err
+ }
+
+ err = addL4Filter(policyCtx, ctx, resMap, p, proto, filterToMerge, ruleLabels)
+ if err != nil {
+ return 0, err
+ }
+ return 1, err
+}
+
+func traceL3(ctx *SearchContext, peerEndpoints api.EndpointSelectorSlice, direction string, isDeny bool) {
+ var result strings.Builder
+
+ // Requirements will be cloned into every selector, only trace them once.
+ if len(peerEndpoints[0].MatchExpressions) > 0 {
+ sel := peerEndpoints[0]
+ result.WriteString(" Enforcing requirements ")
+ result.WriteString(fmt.Sprintf("%+v", sel.MatchExpressions))
+ result.WriteString("\n")
+ }
+ // EndpointSelector
+ for _, sel := range peerEndpoints {
+ if len(sel.MatchLabels) > 0 {
+ if !isDeny {
+ result.WriteString(" Allows ")
+ } else {
+ result.WriteString(" Denies ")
+ }
+ result.WriteString(direction)
+ result.WriteString(" labels ")
+ result.WriteString(sel.String())
+ result.WriteString("\n")
+ }
+ }
+ ctx.PolicyTrace(result.String())
+}
+
+// portRulesCoverContext determines whether L4 portions of rules cover the
+// specified port models.
+//
+// Returns true if the list of ports is 0, or the rules match the ports.
+func rulePortsCoverSearchContext(ports []api.PortProtocol, ctx *SearchContext) bool {
+ if len(ctx.DPorts) == 0 {
+ return true
+ }
+ for _, p := range ports {
+ for _, dp := range ctx.DPorts {
+ tracePort := api.PortProtocol{
+ Protocol: api.L4Proto(dp.Protocol),
+ }
+ if dp.Name != "" {
+ tracePort.Port = dp.Name
+ } else {
+ tracePort.Port = strconv.FormatUint(uint64(dp.Port), 10)
+ }
+ if p.Covers(tracePort) {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+func mergeIngress(policyCtx PolicyContext, ctx *SearchContext, fromEndpoints api.EndpointSelectorSlice, auth *api.Authentication, toPorts, icmp api.PortsIterator, ruleLabels labels.LabelArray, resMap L4PolicyMap) (int, error) {
+ found := 0
+
+ if ctx.From != nil && len(fromEndpoints) > 0 {
+ if ctx.TraceEnabled() {
+ traceL3(ctx, fromEndpoints, "from", policyCtx.IsDeny())
+ }
+ if !fromEndpoints.Matches(ctx.From) {
+ ctx.PolicyTrace(" No label match for %s", ctx.From)
+ return 0, nil
+ }
+ ctx.PolicyTrace(" Found all required labels")
+ }
+
+ // Daemon options may induce L3 allows for host/world. In this case, if
+ // we find any L7 rules matching host/world then we need to turn any L7
+ // restrictions on these endpoints into L7 allow-all so that the
+ // traffic is always allowed, but is also always redirected through the
+ // proxy
+ hostWildcardL7 := make([]string, 0, 2)
+ if option.Config.AlwaysAllowLocalhost() {
+ hostWildcardL7 = append(hostWildcardL7, labels.IDNameHost)
+ if !option.Config.EnableRemoteNodeIdentity {
+ hostWildcardL7 = append(hostWildcardL7, labels.IDNameRemoteNode)
+ }
+ }
+
+ var (
+ cnt int
+ err error
+ )
+
+ // L3-only rule (with requirements folded into fromEndpoints).
+ if toPorts.Len() == 0 && icmp.Len() == 0 && len(fromEndpoints) > 0 {
+ cnt, err = mergeIngressPortProto(policyCtx, ctx, fromEndpoints, auth, hostWildcardL7, &api.PortRule{}, api.PortProtocol{Port: "0", Protocol: api.ProtoAny}, api.ProtoAny, ruleLabels, resMap)
+ if err != nil {
+ return found, err
+ }
+ }
+
+ found += cnt
+
+ err = toPorts.Iterate(func(r api.Ports) error {
+ // For L4 Policy, an empty slice of EndpointSelector indicates that the
+ // rule allows all at L3 - explicitly specify this by creating a slice
+ // with the WildcardEndpointSelector.
+ if len(fromEndpoints) == 0 {
+ fromEndpoints = api.EndpointSelectorSlice{api.WildcardEndpointSelector}
+ }
+ if !policyCtx.IsDeny() {
+ ctx.PolicyTrace(" Allows port %v\n", r.GetPortProtocols())
+ } else {
+ ctx.PolicyTrace(" Denies port %v\n", r.GetPortProtocols())
+ }
+ if !rulePortsCoverSearchContext(r.GetPortProtocols(), ctx) {
+ ctx.PolicyTrace(" No port match found\n")
+ return nil
+ }
+ pr := r.GetPortRule()
+ if pr != nil {
+ if pr.Rules != nil && pr.Rules.L7Proto != "" {
+ ctx.PolicyTrace(" l7proto: \"%s\"\n", pr.Rules.L7Proto)
+ }
+ if !pr.Rules.IsEmpty() {
+ for _, l7 := range pr.Rules.HTTP {
+ ctx.PolicyTrace(" %+v\n", l7)
+ }
+ for _, l7 := range pr.Rules.Kafka {
+ ctx.PolicyTrace(" %+v\n", l7)
+ }
+ for _, l7 := range pr.Rules.L7 {
+ ctx.PolicyTrace(" %+v\n", l7)
+ }
+ }
+ }
+
+ for _, p := range r.GetPortProtocols() {
+ if p.Protocol != api.ProtoAny {
+ cnt, err := mergeIngressPortProto(policyCtx, ctx, fromEndpoints, auth, hostWildcardL7, r, p, p.Protocol, ruleLabels, resMap)
+ if err != nil {
+ return err
+ }
+ found += cnt
+ } else {
+ cnt, err := mergeIngressPortProto(policyCtx, ctx, fromEndpoints, auth, hostWildcardL7, r, p, api.ProtoTCP, ruleLabels, resMap)
+ if err != nil {
+ return err
+ }
+ found += cnt
+
+ cnt, err = mergeIngressPortProto(policyCtx, ctx, fromEndpoints, auth, hostWildcardL7, r, p, api.ProtoUDP, ruleLabels, resMap)
+ if err != nil {
+ return err
+ }
+ found += cnt
+
+ cnt, err = mergeIngressPortProto(policyCtx, ctx, fromEndpoints, auth, hostWildcardL7, r, p, api.ProtoSCTP, ruleLabels, resMap)
+ if err != nil {
+ return err
+ }
+ found += cnt
+ }
+ }
+ return nil
+ })
+ if err != nil {
+ return found, err
+ }
+
+ err = icmp.Iterate(func(r api.Ports) error {
+ if len(fromEndpoints) == 0 {
+ fromEndpoints = api.EndpointSelectorSlice{api.WildcardEndpointSelector}
+ }
+ if !policyCtx.IsDeny() {
+ ctx.PolicyTrace(" Allows ICMP type %v\n", r.GetPortProtocols())
+ } else {
+ ctx.PolicyTrace(" Denies ICMP type %v\n", r.GetPortProtocols())
+ }
+ if !rulePortsCoverSearchContext(r.GetPortProtocols(), ctx) {
+ ctx.PolicyTrace(" No ICMP type match found\n")
+ return nil
+ }
+
+ for _, p := range r.GetPortProtocols() {
+ cnt, err := mergeIngressPortProto(policyCtx, ctx, fromEndpoints, auth, hostWildcardL7, r, p, p.Protocol, ruleLabels, resMap)
+ if err != nil {
+ return err
+ }
+ found += cnt
+ }
+ return nil
+ })
+
+ return found, err
+}
+
+func (state *traceState) selectRule(ctx *SearchContext, r *rule) {
+ ctx.PolicyTrace("* Rule %s: selected\n", r)
+ state.selectedRules++
+}
+
+func (state *traceState) unSelectRule(ctx *SearchContext, labels labels.LabelArray, r *rule) {
+ ctx.PolicyTraceVerbose(" Rule %s: did not select %+v\n", r, labels)
+}
+
+// resolveIngressPolicy analyzes the rule against the given SearchContext, and
+// merges it with any prior-generated policy within the provided L4Policy.
+// Requirements based off of all Ingress requirements (set in FromRequires) in
+// other rules are stored in the specified slice of LabelSelectorRequirement.
+// These requirements are dynamically inserted into a copy of the receiver rule,
+// as requirements form conjunctions across all rules.
+func (r *rule) resolveIngressPolicy(
+ policyCtx PolicyContext,
+ ctx *SearchContext,
+ state *traceState,
+ result L4PolicyMap,
+ requirements, requirementsDeny []slim_metav1.LabelSelectorRequirement,
+) (
+ L4PolicyMap, error,
+) {
+ if !ctx.rulesSelect {
+ if !r.getSelector().Matches(ctx.To) {
+ state.unSelectRule(ctx, ctx.To, r)
+ return nil, nil
+ }
+ }
+
+ state.selectRule(ctx, r)
+ found, foundDeny := 0, 0
+
+ if len(r.Ingress) == 0 && len(r.IngressDeny) == 0 {
+ ctx.PolicyTrace(" No ingress rules\n")
+ }
+ for _, ingressRule := range r.Ingress {
+ fromEndpoints := ingressRule.GetSourceEndpointSelectorsWithRequirements(requirements)
+ cnt, err := mergeIngress(policyCtx, ctx, fromEndpoints, ingressRule.Authentication, ingressRule.ToPorts, ingressRule.ICMPs, r.Rule.Labels.DeepCopy(), result)
+ if err != nil {
+ return nil, err
+ }
+ if cnt > 0 {
+ found += cnt
+ }
+ }
+
+ oldDeny := policyCtx.SetDeny(true)
+ defer func() {
+ policyCtx.SetDeny(oldDeny)
+ }()
+ for _, ingressRule := range r.IngressDeny {
+ fromEndpoints := ingressRule.GetSourceEndpointSelectorsWithRequirements(requirementsDeny)
+ cnt, err := mergeIngress(policyCtx, ctx, fromEndpoints, nil, ingressRule.ToPorts, ingressRule.ICMPs, r.Rule.Labels.DeepCopy(), result)
+ if err != nil {
+ return nil, err
+ }
+ if cnt > 0 {
+ foundDeny += cnt
+ }
+ }
+
+ if found+foundDeny > 0 {
+ if found != 0 {
+ state.matchedRules++
+ }
+ if foundDeny != 0 {
+ state.matchedDenyRules++
+ }
+ return result, nil
+ }
+
+ return nil, nil
+}
+
+func (r *rule) matches(securityIdentity *identity.Identity) bool {
+ r.metadata.Mutex.Lock()
+ defer r.metadata.Mutex.Unlock()
+ var ruleMatches bool
+
+ if ruleMatches, cached := r.metadata.IdentitySelected[securityIdentity.ID]; cached {
+ return ruleMatches
+ }
+ isNode := securityIdentity.ID == identity.ReservedIdentityHost
+ if (r.NodeSelector.LabelSelector != nil) != isNode {
+ r.metadata.IdentitySelected[securityIdentity.ID] = false
+ return ruleMatches
+ }
+ // Fall back to costly matching.
+ if ruleMatches = r.getSelector().Matches(securityIdentity.LabelArray); ruleMatches {
+ // Update cache so we don't have to do costly matching again.
+ r.metadata.IdentitySelected[securityIdentity.ID] = true
+ } else {
+ r.metadata.IdentitySelected[securityIdentity.ID] = false
+ }
+
+ return ruleMatches
+}
+
+// ****************** EGRESS POLICY ******************
+
+func mergeEgress(policyCtx PolicyContext, ctx *SearchContext, toEndpoints api.EndpointSelectorSlice, auth *api.Authentication, toPorts, icmp api.PortsIterator, ruleLabels labels.LabelArray, resMap L4PolicyMap, fqdns api.FQDNSelectorSlice) (int, error) {
+ found := 0
+
+ if ctx.To != nil && len(toEndpoints) > 0 {
+ if ctx.TraceEnabled() {
+ traceL3(ctx, toEndpoints, "to", policyCtx.IsDeny())
+ }
+ if !toEndpoints.Matches(ctx.To) {
+ ctx.PolicyTrace(" No label match for %s", ctx.To)
+ return 0, nil
+ }
+ ctx.PolicyTrace(" Found all required labels")
+ }
+
+ var (
+ cnt int
+ err error
+ )
+
+ // L3-only rule (with requirements folded into toEndpoints).
+ if toPorts.Len() == 0 && icmp.Len() == 0 && len(toEndpoints) > 0 {
+ cnt, err = mergeEgressPortProto(policyCtx, ctx, toEndpoints, auth, &api.PortRule{}, api.PortProtocol{Port: "0", Protocol: api.ProtoAny}, api.ProtoAny, ruleLabels, resMap, fqdns)
+ if err != nil {
+ return found, err
+ }
+ }
+
+ found += cnt
+
+ err = toPorts.Iterate(func(r api.Ports) error {
+ // For L4 Policy, an empty slice of EndpointSelector indicates that the
+ // rule allows all at L3 - explicitly specify this by creating a slice
+ // with the WildcardEndpointSelector.
+ if len(toEndpoints) == 0 {
+ toEndpoints = api.EndpointSelectorSlice{api.WildcardEndpointSelector}
+ }
+ if !policyCtx.IsDeny() {
+ ctx.PolicyTrace(" Allows port %v\n", r.GetPortProtocols())
+ } else {
+ ctx.PolicyTrace(" Denies port %v\n", r.GetPortProtocols())
+ }
+
+ pr := r.GetPortRule()
+ if pr != nil {
+ if !pr.Rules.IsEmpty() {
+ for _, l7 := range pr.Rules.HTTP {
+ ctx.PolicyTrace(" %+v\n", l7)
+ }
+ for _, l7 := range pr.Rules.Kafka {
+ ctx.PolicyTrace(" %+v\n", l7)
+ }
+ for _, l7 := range pr.Rules.L7 {
+ ctx.PolicyTrace(" %+v\n", l7)
+ }
+ }
+ }
+
+ for _, p := range r.GetPortProtocols() {
+ if p.Protocol != api.ProtoAny {
+ cnt, err := mergeEgressPortProto(policyCtx, ctx, toEndpoints, auth, r, p, p.Protocol, ruleLabels, resMap, fqdns)
+ if err != nil {
+ return err
+ }
+ found += cnt
+ } else {
+ cnt, err := mergeEgressPortProto(policyCtx, ctx, toEndpoints, auth, r, p, api.ProtoTCP, ruleLabels, resMap, fqdns)
+ if err != nil {
+ return err
+ }
+ found += cnt
+
+ cnt, err = mergeEgressPortProto(policyCtx, ctx, toEndpoints, auth, r, p, api.ProtoUDP, ruleLabels, resMap, fqdns)
+ if err != nil {
+ return err
+ }
+ found += cnt
+
+ cnt, err = mergeEgressPortProto(policyCtx, ctx, toEndpoints, auth, r, p, api.ProtoSCTP, ruleLabels, resMap, fqdns)
+ if err != nil {
+ return err
+ }
+ found += cnt
+ }
+ }
+ return nil
+ },
+ )
+ if err != nil {
+ return found, err
+ }
+
+ err = icmp.Iterate(func(r api.Ports) error {
+ if len(toEndpoints) == 0 {
+ toEndpoints = api.EndpointSelectorSlice{api.WildcardEndpointSelector}
+ }
+ if !policyCtx.IsDeny() {
+ ctx.PolicyTrace(" Allows ICMP type %v\n", r.GetPortProtocols())
+ } else {
+ ctx.PolicyTrace(" Denies ICMP type %v\n", r.GetPortProtocols())
+ }
+
+ for _, p := range r.GetPortProtocols() {
+ cnt, err := mergeEgressPortProto(policyCtx, ctx, toEndpoints, auth, r, p, p.Protocol, ruleLabels, resMap, fqdns)
+ if err != nil {
+ return err
+ }
+ found += cnt
+ }
+ return nil
+ })
+
+ return found, err
+}
+
+// mergeEgressPortProto merges all rules which share the same port & protocol that
+// select a given set of endpoints. It updates the L4Filter mapped to by the specified
+// port and protocol with the contents of the provided PortRule. If the rule
+// being merged has conflicting L7 rules with those already in the provided
+// L4PolicyMap for the specified port-protocol tuple, it returns an error.
+func mergeEgressPortProto(policyCtx PolicyContext, ctx *SearchContext, endpoints api.EndpointSelectorSlice, auth *api.Authentication, r api.Ports, p api.PortProtocol,
+ proto api.L4Proto, ruleLabels labels.LabelArray, resMap L4PolicyMap, fqdns api.FQDNSelectorSlice) (int, error) {
+ // Create a new L4Filter
+ filterToMerge, err := createL4EgressFilter(policyCtx, endpoints, auth, r, p, proto, ruleLabels, fqdns)
+ if err != nil {
+ return 0, err
+ }
+
+ err = addL4Filter(policyCtx, ctx, resMap, p, proto, filterToMerge, ruleLabels)
+ if err != nil {
+ return 0, err
+ }
+ return 1, err
+}
+
+func (r *rule) resolveEgressPolicy(
+ policyCtx PolicyContext,
+ ctx *SearchContext,
+ state *traceState,
+ result L4PolicyMap,
+ requirements, requirementsDeny []slim_metav1.LabelSelectorRequirement,
+) (
+ L4PolicyMap, error,
+) {
+ if !ctx.rulesSelect {
+ if !r.getSelector().Matches(ctx.From) {
+ state.unSelectRule(ctx, ctx.From, r)
+ return nil, nil
+ }
+ }
+
+ state.selectRule(ctx, r)
+ found, foundDeny := 0, 0
+
+ if len(r.Egress) == 0 && len(r.EgressDeny) == 0 {
+ ctx.PolicyTrace(" No egress rules\n")
+ }
+ for _, egressRule := range r.Egress {
+ toEndpoints := egressRule.GetDestinationEndpointSelectorsWithRequirements(requirements)
+ cnt, err := mergeEgress(policyCtx, ctx, toEndpoints, egressRule.Authentication, egressRule.ToPorts, egressRule.ICMPs, r.Rule.Labels.DeepCopy(), result, egressRule.ToFQDNs)
+ if err != nil {
+ return nil, err
+ }
+ if cnt > 0 {
+ found += cnt
+ }
+ }
+
+ oldDeny := policyCtx.SetDeny(true)
+ defer func() {
+ policyCtx.SetDeny(oldDeny)
+ }()
+ for _, egressRule := range r.EgressDeny {
+ toEndpoints := egressRule.GetDestinationEndpointSelectorsWithRequirements(requirementsDeny)
+ cnt, err := mergeEgress(policyCtx, ctx, toEndpoints, nil, egressRule.ToPorts, egressRule.ICMPs, r.Rule.Labels.DeepCopy(), result, nil)
+ if err != nil {
+ return nil, err
+ }
+ if cnt > 0 {
+ foundDeny += cnt
+ }
+ }
+
+ if found+foundDeny > 0 {
+ if found != 0 {
+ state.matchedRules++
+ }
+ if foundDeny != 0 {
+ state.matchedDenyRules++
+ }
+ return result, nil
+ }
+
+ return nil, nil
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/policy/rules.go b/vendor/github.com/cilium/cilium/pkg/policy/rules.go
new file mode 100644
index 000000000..f5b0a2259
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/policy/rules.go
@@ -0,0 +1,160 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package policy
+
+import (
+ "fmt"
+
+ slim_metav1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1"
+ policyapi "github.com/cilium/cilium/pkg/policy/api"
+)
+
+// ruleSlice is a wrapper around a slice of *rule, which allows for functions
+// to be written with []*rule as a receiver.
+type ruleSlice []*rule
+
+func (rules ruleSlice) resolveL4IngressPolicy(policyCtx PolicyContext, ctx *SearchContext) (L4PolicyMap, error) {
+ result := L4PolicyMap{}
+
+ ctx.PolicyTrace("\n")
+ ctx.PolicyTrace("Resolving ingress policy for %+v\n", ctx.To)
+
+ state := traceState{}
+ var matchedRules ruleSlice
+ var requirements, requirementsDeny []slim_metav1.LabelSelectorRequirement
+
+ // Iterate over all FromRequires which select ctx.To. These requirements
+ // will be appended to each EndpointSelector's MatchExpressions in
+ // each FromEndpoints for all ingress rules. This ensures that FromRequires
+ // is taken into account when evaluating policy at L4.
+ for _, r := range rules {
+ if ctx.rulesSelect || r.getSelector().Matches(ctx.To) {
+ matchedRules = append(matchedRules, r)
+ for _, ingressRule := range r.Ingress {
+ for _, requirement := range ingressRule.FromRequires {
+ requirements = append(requirements, requirement.ConvertToLabelSelectorRequirementSlice()...)
+ }
+ }
+ for _, ingressRule := range r.IngressDeny {
+ for _, requirement := range ingressRule.FromRequires {
+ requirementsDeny = append(requirementsDeny, requirement.ConvertToLabelSelectorRequirementSlice()...)
+ }
+ }
+ }
+ }
+
+ // Only dealing with matching rules from now on. Mark it in the ctx
+ oldRulesSelect := ctx.rulesSelect
+ ctx.rulesSelect = true
+
+ for _, r := range matchedRules {
+ _, err := r.resolveIngressPolicy(policyCtx, ctx, &state, result, requirements, requirementsDeny)
+ if err != nil {
+ return nil, err
+ }
+ state.ruleID++
+ }
+
+ state.trace(len(rules), ctx)
+
+ // Restore ctx in case caller uses it again.
+ ctx.rulesSelect = oldRulesSelect
+
+ return result, nil
+}
+
+func (rules ruleSlice) resolveL4EgressPolicy(policyCtx PolicyContext, ctx *SearchContext) (L4PolicyMap, error) {
+ result := L4PolicyMap{}
+
+ ctx.PolicyTrace("\n")
+ ctx.PolicyTrace("Resolving egress policy for %+v\n", ctx.From)
+
+ state := traceState{}
+ var matchedRules ruleSlice
+ var requirements, requirementsDeny []slim_metav1.LabelSelectorRequirement
+
+ // Iterate over all ToRequires which select ctx.To. These requirements will
+ // be appended to each EndpointSelector's MatchExpressions in each
+ // ToEndpoints for all egress rules. This ensures that ToRequires is
+ // taken into account when evaluating policy at L4.
+ for _, r := range rules {
+ if ctx.rulesSelect || r.getSelector().Matches(ctx.From) {
+ matchedRules = append(matchedRules, r)
+ for _, egressRule := range r.Egress {
+ for _, requirement := range egressRule.ToRequires {
+ requirements = append(requirements, requirement.ConvertToLabelSelectorRequirementSlice()...)
+ }
+ }
+ for _, egressRule := range r.EgressDeny {
+ for _, requirement := range egressRule.ToRequires {
+ requirementsDeny = append(requirementsDeny, requirement.ConvertToLabelSelectorRequirementSlice()...)
+ }
+ }
+ }
+ }
+
+ // Only dealing with matching rules from now on. Mark it in the ctx
+ oldRulesSelect := ctx.rulesSelect
+ ctx.rulesSelect = true
+
+ for i, r := range matchedRules {
+ state.ruleID = i
+ _, err := r.resolveEgressPolicy(policyCtx, ctx, &state, result, requirements, requirementsDeny)
+ if err != nil {
+ return nil, err
+ }
+ state.ruleID++
+ }
+
+ state.trace(len(rules), ctx)
+
+ // Restore ctx in case caller uses it again.
+ ctx.rulesSelect = oldRulesSelect
+
+ return result, nil
+}
+
+// updateEndpointsCaches iterates over a given list of rules to update the cache
+// within the rule which determines whether or not the given identity is
+// selected by that rule. If a rule in the list does select said identity, it is
+// added to epSet. Note that epSet can be shared across goroutines!
+// Returns whether the endpoint was selected by one of the rules, or if the
+// endpoint is nil.
+func (rules ruleSlice) updateEndpointsCaches(ep Endpoint) (bool, error) {
+ if ep == nil {
+ return false, fmt.Errorf("cannot update caches in rules because endpoint is nil")
+ }
+ id := ep.GetID16()
+ securityIdentity, err := ep.GetSecurityIdentity()
+ if err != nil {
+ return false, fmt.Errorf("cannot update caches in rules for endpoint %d because it is being deleted: %s", id, err)
+ }
+
+ if securityIdentity == nil {
+ return false, fmt.Errorf("cannot update caches in rules for endpoint %d because it has a nil identity", id)
+ }
+ endpointSelected := false
+ for _, r := range rules {
+ // NodeSelector can only match nodes, EndpointSelector only pods.
+ if (r.NodeSelector.LabelSelector != nil) != ep.IsHost() {
+ continue
+ }
+ // Update the matches cache of each rule, and note if
+ // the ep is selected by any of them.
+ if ruleMatches := r.matches(securityIdentity); ruleMatches {
+ endpointSelected = true
+ }
+ }
+
+ return endpointSelected, nil
+}
+
+// AsPolicyRules return the internal policyapi.Rule objects as a policyapi.Rules object
+func (rules ruleSlice) AsPolicyRules() policyapi.Rules {
+ policyRules := make(policyapi.Rules, 0, len(rules))
+ for _, r := range rules {
+ policyRules = append(policyRules, &r.Rule)
+ }
+ return policyRules
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/policy/selectorcache.go b/vendor/github.com/cilium/cilium/pkg/policy/selectorcache.go
new file mode 100644
index 000000000..f90e43f28
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/policy/selectorcache.go
@@ -0,0 +1,1156 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package policy
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "net"
+ "sort"
+ "strings"
+ "sync"
+ "sync/atomic"
+
+ "github.com/sirupsen/logrus"
+
+ "github.com/cilium/cilium/api/v1/models"
+ "github.com/cilium/cilium/pkg/identity"
+ "github.com/cilium/cilium/pkg/identity/cache"
+ k8sConst "github.com/cilium/cilium/pkg/k8s/apis/cilium.io"
+ "github.com/cilium/cilium/pkg/labels"
+ "github.com/cilium/cilium/pkg/lock"
+ "github.com/cilium/cilium/pkg/logging/logfields"
+ "github.com/cilium/cilium/pkg/option"
+ "github.com/cilium/cilium/pkg/policy/api"
+)
+
+// CachedSelector represents an identity selector owned by the selector cache
+type CachedSelector interface {
+ // GetSelections returns the cached set of numeric identities
+ // selected by the CachedSelector. The retuned slice must NOT
+ // be modified, as it is shared among multiple users.
+ GetSelections() identity.NumericIdentitySlice
+
+ // GetMetadataLabels returns metadata labels for additional context
+ // surrounding the selector. These are typically the labels associated with
+ // Cilium rules.
+ GetMetadataLabels() labels.LabelArray
+
+ // Selects return 'true' if the CachedSelector selects the given
+ // numeric identity.
+ Selects(nid identity.NumericIdentity) bool
+
+ // IsWildcard returns true if the endpoint selector selects
+ // all endpoints.
+ IsWildcard() bool
+
+ // IsNone returns true if the selector never selects anything
+ IsNone() bool
+
+ // String returns the string representation of this selector.
+ // Used as a map key.
+ String() string
+}
+
+// CachedSelectorSlice is a slice of CachedSelectors that can be sorted.
+type CachedSelectorSlice []CachedSelector
+
+// MarshalJSON returns the CachedSelectors as JSON formatted buffer
+func (s CachedSelectorSlice) MarshalJSON() ([]byte, error) {
+ buffer := bytes.NewBufferString("[")
+ for i, selector := range s {
+ buf, err := json.Marshal(selector.String())
+ if err != nil {
+ return nil, err
+ }
+
+ buffer.Write(buf)
+ if i < len(s)-1 {
+ buffer.WriteString(",")
+ }
+ }
+ buffer.WriteString("]")
+ return buffer.Bytes(), nil
+}
+
+func (s CachedSelectorSlice) Len() int { return len(s) }
+func (s CachedSelectorSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+func (s CachedSelectorSlice) Less(i, j int) bool {
+ return strings.Compare(s[i].String(), s[j].String()) < 0
+}
+
+// SelectsAllEndpoints returns whether the CachedSelectorSlice selects all
+// endpoints, which is true if the wildcard endpoint selector is present in the
+// slice.
+func (s CachedSelectorSlice) SelectsAllEndpoints() bool {
+ for _, selector := range s {
+ if selector.IsWildcard() {
+ return true
+ }
+ }
+ return false
+}
+
+// CachedSelectionUser inserts selectors into the cache and gets update
+// callbacks whenever the set of selected numeric identities change for
+// the CachedSelectors pushed by it.
+type CachedSelectionUser interface {
+ // IdentitySelectionUpdated implementations MUST NOT call back
+ // to the name manager or the selector cache while executing this function!
+ //
+ // The caller is responsible for making sure the same identity is not
+ // present in both 'added' and 'deleted'.
+ IdentitySelectionUpdated(selector CachedSelector, added, deleted []identity.NumericIdentity)
+}
+
+// identitySelector is the internal interface for all selectors in the
+// selector cache.
+//
+// identitySelector represents the mapping of an EndpointSelector
+// to a slice of identities. These mappings are updated via two
+// different processes:
+//
+// 1. When policy rules are changed these are added and/or deleted
+// depending on what selectors the rules contain. Cached selections of
+// new identitySelectors are pre-populated from the set of currently
+// known identities.
+//
+// 2. When reachacble identities appear or disappear, either via local
+// allocation (CIDRs), or via the KV-store (remote endpoints). In this
+// case all existing identitySelectors are walked through and their
+// cached selections are updated as necessary.
+//
+// In both of the above cases the set of existing identitySelectors is
+// write locked.
+//
+// To minimize the upkeep the identity selectors are shared across
+// all IdentityPolicies, so that only one copy exists for each
+// identitySelector. Users of the SelectorCache take care of creating
+// identitySelectors as needed by identity policies. The set of
+// identitySelectors is read locked during an IdentityPolicy update so
+// that the the policy is always updated using a coherent set of
+// cached selections.
+//
+// identitySelector is used as a map key, so it must not be implemented by a
+// map, slice, or a func, or a runtime panic will be triggered. In all
+// cases below identitySelector is being implemented by structs.
+//
+// Because the selector exposed to the user is used as a map key, it must always
+// be passed to the user as a pointer to the actual implementation type.
+// For this reason 'notifyUsers' must be implemented by each type separately.
+type identitySelector interface {
+ CachedSelector
+ addUser(CachedSelectionUser) (added bool)
+
+ // Called with NameManager and SelectorCache locks held
+ removeUser(CachedSelectionUser, identityNotifier) (last bool)
+
+ // fetchIdentityMappings returns all of the identities currently
+ // reference-counted by this selector. It is used during cleanup of the
+ // selector.
+ fetchIdentityMappings() []identity.NumericIdentity
+
+ // This may be called while the NameManager lock is held. wg.Wait()
+ // returns after user notifications have been completed, which may require
+ // taking Endpoint and SelectorCache locks, so these locks must not be
+ // held when calling wg.Wait().
+ notifyUsers(sc *SelectorCache, added, deleted []identity.NumericIdentity, wg *sync.WaitGroup)
+
+ numUsers() int
+}
+
+// scIdentity is the information we need about a an identity that rules can select
+type scIdentity struct {
+ NID identity.NumericIdentity
+ lbls labels.LabelArray
+ nets []*net.IPNet // Most specific CIDR for the identity, if any.
+ computed bool // nets has been computed
+ namespace string // value of the namespace label, or ""
+}
+
+// scIdentityCache is a cache of Identities keyed by the numeric identity
+type scIdentityCache map[identity.NumericIdentity]scIdentity
+
+func newIdentity(nid identity.NumericIdentity, lbls labels.LabelArray) scIdentity {
+ return scIdentity{
+ NID: nid,
+ lbls: lbls,
+ nets: getLocalScopeNets(nid, lbls),
+ namespace: lbls.Get(labels.LabelSourceK8sKeyPrefix + k8sConst.PodNamespaceLabel),
+ computed: true,
+ }
+}
+
+// getLocalScopeNets returns the most specific CIDR for a local scope identity.
+func getLocalScopeNets(id identity.NumericIdentity, lbls labels.LabelArray) []*net.IPNet {
+ if id.HasLocalScope() {
+ var (
+ maskSize int
+ mostSpecificCidr *net.IPNet
+ )
+ for _, lbl := range lbls {
+ if lbl.Source == labels.LabelSourceCIDR {
+ _, netIP, err := net.ParseCIDR(lbl.Key)
+ if err == nil {
+ if ms, _ := netIP.Mask.Size(); ms > maskSize {
+ mostSpecificCidr = netIP
+ maskSize = ms
+ }
+ }
+ }
+ }
+ if mostSpecificCidr != nil {
+ return []*net.IPNet{mostSpecificCidr}
+ }
+ }
+ return nil
+}
+
+func getIdentityCache(ids cache.IdentityCache) scIdentityCache {
+ idCache := make(map[identity.NumericIdentity]scIdentity, len(ids))
+ for nid, lbls := range ids {
+ idCache[nid] = newIdentity(nid, lbls)
+ }
+ return idCache
+}
+
+// userNotification stores the information needed to call
+// IdentitySelectionUpdated callbacks to notify users of selector's
+// identity changes. These are queued to be able to call the callbacks
+// in FIFO order while not holding any locks.
+type userNotification struct {
+ user CachedSelectionUser
+ selector CachedSelector
+ added []identity.NumericIdentity
+ deleted []identity.NumericIdentity
+ wg *sync.WaitGroup
+}
+
+// SelectorCache caches identities, identity selectors, and the
+// subsets of identities each selector selects.
+type SelectorCache struct {
+ mutex lock.RWMutex
+
+ // idAllocator is used to allocate and release identities. It is used
+ // by the NameManager to manage identities corresponding to FQDNs.
+ idAllocator cache.IdentityAllocator
+
+ // idCache contains all known identities as informed by the
+ // kv-store and the local identity facility via our
+ // UpdateIdentities() function.
+ idCache scIdentityCache
+
+ // map key is the string representation of the selector being cached.
+ selectors map[string]identitySelector
+
+ localIdentityNotifier identityNotifier
+
+ // userCond is a condition variable for receiving signals
+ // about addition of new elements in userNotes
+ userCond *sync.Cond
+ // userMutex protects userNotes and is linked to userCond
+ userMutex lock.Mutex
+ // userNotes holds a FIFO list of user notifications to be made
+ userNotes []userNotification
+
+ // used to lazily start the handler for user notifications.
+ startNotificationsHandlerOnce sync.Once
+}
+
+// GetModel returns the API model of the SelectorCache.
+func (sc *SelectorCache) GetModel() models.SelectorCache {
+ sc.mutex.RLock()
+ defer sc.mutex.RUnlock()
+
+ selCacheMdl := make(models.SelectorCache, 0, len(sc.selectors))
+
+ for selector, idSel := range sc.selectors {
+ selections := idSel.GetSelections()
+ ids := make([]int64, 0, len(selections))
+ for i := range selections {
+ ids = append(ids, int64(selections[i]))
+ }
+ selMdl := &models.SelectorIdentityMapping{
+ Selector: selector,
+ Identities: ids,
+ Users: int64(idSel.numUsers()),
+ Labels: idSel.GetMetadataLabels(),
+ }
+ selCacheMdl = append(selCacheMdl, selMdl)
+ }
+
+ return selCacheMdl
+}
+
+func (sc *SelectorCache) handleUserNotifications() {
+ for {
+ sc.userMutex.Lock()
+ for len(sc.userNotes) == 0 {
+ sc.userCond.Wait()
+ }
+ // get the current batch of notifications and release the lock so that SelectorCache
+ // can't block on userMutex while we call IdentitySelectionUpdated callbacks below.
+ notifications := sc.userNotes
+ sc.userNotes = nil
+ sc.userMutex.Unlock()
+
+ for _, n := range notifications {
+ n.user.IdentitySelectionUpdated(n.selector, n.added, n.deleted)
+ n.wg.Done()
+ }
+ }
+}
+
+func (sc *SelectorCache) queueUserNotification(user CachedSelectionUser, selector CachedSelector, added, deleted []identity.NumericIdentity, wg *sync.WaitGroup) {
+ sc.startNotificationsHandlerOnce.Do(func() {
+ go sc.handleUserNotifications()
+ })
+ wg.Add(1)
+ sc.userMutex.Lock()
+ sc.userNotes = append(sc.userNotes, userNotification{
+ user: user,
+ selector: selector,
+ added: added,
+ deleted: deleted,
+ wg: wg,
+ })
+ sc.userMutex.Unlock()
+ sc.userCond.Signal()
+}
+
+// NewSelectorCache creates a new SelectorCache with the given identities.
+func NewSelectorCache(allocator cache.IdentityAllocator, ids cache.IdentityCache) *SelectorCache {
+ sc := &SelectorCache{
+ idAllocator: allocator,
+ idCache: getIdentityCache(ids),
+ selectors: make(map[string]identitySelector),
+ }
+ sc.userCond = sync.NewCond(&sc.userMutex)
+ return sc
+}
+
+// SetLocalIdentityNotifier injects the provided identityNotifier into the
+// SelectorCache. Currently, this is used to inject the FQDN subsystem into
+// the SelectorCache so the SelectorCache can notify the FQDN subsystem when
+// it should be aware of a given FQDNSelector for which CIDR identities need
+// to be provided upon DNS lookups which corespond to said FQDNSelector.
+func (sc *SelectorCache) SetLocalIdentityNotifier(pop identityNotifier) {
+ sc.localIdentityNotifier = pop
+}
+
+var (
+ // Empty slice of numeric identities used for all selectors that select nothing
+ emptySelection identity.NumericIdentitySlice
+ // wildcardSelectorKey is used to compare if a key is for a wildcard
+ wildcardSelectorKey = api.WildcardEndpointSelector.LabelSelector.String()
+ // noneSelectorKey is used to compare if a key is for "reserved:none"
+ noneSelectorKey = api.EndpointSelectorNone.LabelSelector.String()
+)
+
+type selectorManager struct {
+ key string
+ selections atomic.Pointer[identity.NumericIdentitySlice]
+ users map[CachedSelectionUser]struct{}
+ cachedSelections map[identity.NumericIdentity]struct{}
+ metadataLbls labels.LabelArray
+}
+
+// Equal is used by checker.Equals, and only considers the identity of the selector,
+// ignoring the internal state!
+func (s *selectorManager) Equal(b *selectorManager) bool {
+ return s.key == b.key
+}
+
+//
+// CachedSelector implementation (== Public API)
+//
+// No locking needed.
+//
+
+// GetSelections returns the set of numeric identities currently
+// selected. The cached selections can be concurrently updated. In
+// that case GetSelections() will return either the old or new version
+// of the selections. If the old version is returned, the user is
+// guaranteed to receive a notification including the update.
+func (s *selectorManager) GetSelections() identity.NumericIdentitySlice {
+ selections := s.selections.Load()
+ if selections == nil {
+ return emptySelection
+ }
+ return *selections
+}
+
+func (s *selectorManager) GetMetadataLabels() labels.LabelArray {
+ return s.metadataLbls
+}
+
+// Selects return 'true' if the CachedSelector selects the given
+// numeric identity.
+func (s *selectorManager) Selects(nid identity.NumericIdentity) bool {
+ if s.IsWildcard() {
+ return true
+ }
+ nids := s.GetSelections()
+ idx := sort.Search(len(nids), func(i int) bool { return nids[i] >= nid })
+ return idx < len(nids) && nids[idx] == nid
+}
+
+// IsWildcard returns true if the endpoint selector selects all
+// endpoints.
+func (s *selectorManager) IsWildcard() bool {
+ return s.key == wildcardSelectorKey
+}
+
+// IsNone returns true if the endpoint selector never selects anything.
+func (s *selectorManager) IsNone() bool {
+ return s.key == noneSelectorKey
+}
+
+// String returns the map key for this selector
+func (s *selectorManager) String() string {
+ return s.key
+}
+
+//
+// identitySelector implementation (== internal API)
+//
+
+// lock must be held
+func (s *selectorManager) addUser(user CachedSelectionUser) (added bool) {
+ if _, exists := s.users[user]; exists {
+ return false
+ }
+ s.users[user] = struct{}{}
+ return true
+}
+
+// lock must be held
+func (s *selectorManager) removeUser(user CachedSelectionUser, dnsProxy identityNotifier) (last bool) {
+ delete(s.users, user)
+ return len(s.users) == 0
+}
+
+// locks must be held for the dnsProxy and the SelectorCache
+func (f *fqdnSelector) removeUser(user CachedSelectionUser, dnsProxy identityNotifier) (last bool) {
+ delete(f.users, user)
+ if len(f.users) == 0 {
+ dnsProxy.UnregisterForIdentityUpdatesLocked(f.selector)
+ return true
+ }
+ return false
+}
+
+// lock must be held
+func (s *selectorManager) numUsers() int {
+ return len(s.users)
+}
+
+// updateSelections updates the immutable slice representation of the
+// cached selections after the cached selections have been changed.
+//
+// lock must be held
+func (s *selectorManager) updateSelections() {
+ selections := make(identity.NumericIdentitySlice, len(s.cachedSelections))
+ i := 0
+ for nid := range s.cachedSelections {
+ selections[i] = nid
+ i++
+ }
+ // Sort the numeric identities so that the map iteration order
+ // does not matter. This makes testing easier, but may help
+ // identifying changes easier also otherwise.
+ sort.Slice(selections, func(i, j int) bool {
+ return selections[i] < selections[j]
+ })
+ s.setSelections(&selections)
+}
+
+func (s *selectorManager) setSelections(selections *identity.NumericIdentitySlice) {
+ if len(*selections) > 0 {
+ s.selections.Store(selections)
+ } else {
+ s.selections.Store(&emptySelection)
+ }
+}
+
+type fqdnSelector struct {
+ selectorManager
+ selector api.FQDNSelector
+}
+
+// lock must be held
+//
+// The caller is responsible for making sure the same identity is not
+// present in both 'added' and 'deleted'.
+func (f *fqdnSelector) notifyUsers(sc *SelectorCache, added, deleted []identity.NumericIdentity, wg *sync.WaitGroup) {
+ for user := range f.users {
+ // pass 'f' to the user as '*fqdnSelector'
+ sc.queueUserNotification(user, f, added, deleted, wg)
+ }
+}
+
+// allocateIdentityMappings is a wrapper for the underlying identity allocator
+// which takes a slice of IPs that should be allocated with a specified
+// selector, and allocates identities for each of them. This may cause
+// allocation of new identities, or take reference counts on existing local
+// identities. Therefore, the caller must take care to ensure that these
+// identities are eventually released via a call to releaseIdentityMappings().
+//
+// The typical usage to properly track identity references is roughly:
+//
+// identities := SelectorCache.allocateIdentityMappings(...)
+// SelectorCache.mutex.Lock()
+// duplicateIdentities := fqdnSelector.transferIdentityReferencesToSelector(...)
+// SelectorCache.mutex.Unlock()
+// SelectorCache.releaseIdentityMappings(duplicateIdentities)
+// ... (active usage of the selector)
+// SelectorCache.mutex.Lock()
+// remainingIdentities := SelectorCache.removeSelectorLocked(...)
+// SelectorCache.mutex.Unlock()
+// SelectorCache.releaseIdentityMappings(remainingIdentities)
+//
+// sc.mutex MUST NOT be held while calling this function.
+func (sc *SelectorCache) allocateIdentityMappings(sel api.FQDNSelector, selectorIPMapping map[api.FQDNSelector][]net.IP) []*identity.Identity {
+ // We don't know whether the IPs are associated with the cached copy
+ // of this selector until we map those IPs to identities and look
+ // up the cached copy of the selector. This requires potentially
+ // allocating a CIDR identity for those IPs, and grabbing the
+ // SelectorCache mutex (which cannot be held during allocations due
+ // to pkg/identity/cache/cache.identityWatcher).
+ //
+ // Therefore, here we unconditionally allocate identities for all IPs
+ // in 'selectorIPMapping', then find out if any are duplicated with the
+ // existing selector content later on.
+ var (
+ currentlyAllocatedIdentities []*identity.Identity
+ selectorIPs []net.IP
+ err error
+ )
+
+ selectorIPs = selectorIPMapping[sel]
+ log.WithFields(logrus.Fields{
+ "fqdnSelector": sel,
+ "ips": selectorIPs,
+ }).Debug("getting identities for IPs associated with FQDNSelector")
+
+ // TODO: Consider if upserts to ipcache should be delayed until endpoint policies have been
+ // updated. This is the path from policy updates rather than for DNS proxy results. Hence
+ // any existing IPs would typically already have been pushed to the ipcache as they would
+ // not be newly allocated. We need the 'allocation' here to get a reference count on the
+ // allocations.
+ currentlyAllocatedIdentities, err = sc.idAllocator.AllocateCIDRsForIPs(selectorIPs, nil)
+ if err != nil {
+ log.WithError(err).WithField("prefixes", selectorIPs).Warn(
+ "failed to allocate identities for IPs")
+ return nil
+ }
+
+ return currentlyAllocatedIdentities
+}
+
+// transferIdentityReferencesToSelector walks through the specified slice of
+// identities, and associates them with the received selector. If any of the
+// identities passed into this function are already associated with the
+// selector, then these identities are returned to the caller.
+//
+// The goal of this function is to ensure that at any given point in time,
+// the selector holds a maximum of one reference to any given identity.
+// If the calling code opportunistically allocates references to identities
+// twice for a given selector, this function will detect this case and collect
+// the set of identities that are referenced twice.
+//
+// The caller MUST release references to each identity in the returned slice
+// after releasing SelectorCache.mutex.
+func (f *fqdnSelector) transferIdentityReferencesToSelector(currentlyAllocatedIdentities []*identity.Identity) []identity.NumericIdentity {
+ identitiesToRelease := make([]identity.NumericIdentity, 0, len(currentlyAllocatedIdentities))
+ for _, id := range currentlyAllocatedIdentities {
+ if _, exists := f.cachedSelections[id.ID]; exists {
+ identitiesToRelease = append(identitiesToRelease, id.ID)
+ }
+ f.cachedSelections[id.ID] = struct{}{}
+ }
+
+ return identitiesToRelease
+}
+
+// fetchIdentityMappings returns the set of identities that this selector
+// holds references for. This should be used during cleanup of the selector
+// to ensure that all remaining references to local identities are released,
+// in order to prevent leaking of identities.
+func (f *fqdnSelector) fetchIdentityMappings() []identity.NumericIdentity {
+ ids := make([]identity.NumericIdentity, 0, len(f.cachedSelections))
+ for id := range f.cachedSelections {
+ ids = append(ids, id)
+ }
+
+ return ids
+}
+
+// releaseIdentityMappings must be called exactly once for each selector that
+// is removed from the selectorcache, in order to release local identity
+// references held in the selector's cachedSelections.
+//
+// See SelectorCache.allocateIdentityMappings() for a lifecycle description.
+//
+// sc.mutex MUST NOT be held while calling this function.
+func (sc *SelectorCache) releaseIdentityMappings(identitiesToRelease []identity.NumericIdentity) {
+ // TODO: Remove timeouts for CIDR identity allocation (as it is local).
+ ctx, cancel := context.WithTimeout(context.TODO(), option.Config.KVstoreConnectivityTimeout)
+ defer cancel()
+ sc.idAllocator.ReleaseCIDRIdentitiesByID(ctx, identitiesToRelease)
+}
+
+// identityNotifier provides a means for other subsystems to be made aware of a
+// given FQDNSelector (currently pkg/fqdn) so that said subsystems can notify
+// the SelectorCache about new IPs (via CIDR Identities) which correspond to
+// said FQDNSelector. This is necessary since there is nothing intrinsic to a
+// CIDR Identity that says that it corresponds to a given FQDNSelector; this
+// relationship is contained only via DNS responses, which are handled
+// externally.
+type identityNotifier interface {
+ // Lock must be held during any calls to *Locked functions below.
+ Lock()
+
+ // Unlock must be called after calls to *Locked functions below.
+ Unlock()
+
+ // RegisterForIdentityUpdatesLocked exposes this FQDNSelector so that identities
+ // for IPs contained in a DNS response that matches said selector can
+ // be propagated back to the SelectorCache via `UpdateFQDNSelector`.
+ //
+ // This function should only be called when the SelectorCache has been
+ // made aware of the FQDNSelector for the first time; subsequent
+ // updates to the selectors should be made via `UpdateFQDNSelector`.
+ RegisterForIdentityUpdatesLocked(selector api.FQDNSelector)
+
+ // UnregisterForIdentityUpdatesLocked removes this FQDNSelector from the set of
+ // FQDNSelectors which are being tracked by the identityNotifier. The result
+ // of this is that no more updates for IPs which correspond to said selector
+ // are propagated back to the SelectorCache via `UpdateFQDNSelector`.
+ // This occurs when there are no more users of a given FQDNSelector for the
+ // SelectorCache.
+ UnregisterForIdentityUpdatesLocked(selector api.FQDNSelector)
+
+ // MapSelectorsToIPsLocked returns a slice of IPs that may be
+ // associated with the specified FQDN selector, based on the
+ // currently-known DNS mappings for the IPs held inside the
+ // identityNotifier.
+ MapSelectorsToIPsLocked(map[api.FQDNSelector]struct{}) (selectorsMissingIPs []api.FQDNSelector, selectorIPMapping map[api.FQDNSelector][]net.IP)
+}
+
+type labelIdentitySelector struct {
+ selectorManager
+ selector api.EndpointSelector
+ namespaces []string // allowed namespaces, or ""
+}
+
+// lock must be held
+//
+// The caller is responsible for making sure the same identity is not
+// present in both 'added' and 'deleted'.
+func (l *labelIdentitySelector) notifyUsers(sc *SelectorCache, added, deleted []identity.NumericIdentity, wg *sync.WaitGroup) {
+ for user := range l.users {
+ // pass 'l' to the user as '*labelIdentitySelector'
+ sc.queueUserNotification(user, l, added, deleted, wg)
+ }
+}
+
+// xxxMatches returns true if the CachedSelector matches given labels.
+// This is slow, but only used for policy tracing, so it's OK.
+func (l *labelIdentitySelector) xxxMatches(labels labels.LabelArray) bool {
+ return l.selector.Matches(labels)
+}
+
+func (l *labelIdentitySelector) matchesNamespace(ns string) bool {
+ if len(l.namespaces) > 0 {
+ if ns != "" {
+ for i := range l.namespaces {
+ if ns == l.namespaces[i] {
+ return true
+ }
+ }
+ }
+ // namespace required, but no match
+ return false
+ }
+ // no namespace required, match
+ return true
+}
+
+func (l *labelIdentitySelector) matches(identity scIdentity) bool {
+ return l.matchesNamespace(identity.namespace) && l.selector.Matches(identity.lbls)
+}
+
+func (l *labelIdentitySelector) fetchIdentityMappings() []identity.NumericIdentity {
+ // labelIdentitySelectors don't retain identity references, so no-op.
+ return nil
+}
+
+//
+// CachedSelector implementation (== Public API)
+//
+// No locking needed.
+//
+
+// UpdateFQDNSelector updates the mapping of fqdnKey (the FQDNSelector from a
+// policy rule as a string) to to the provided list of identities. If the contents
+// of the cachedSelections differ from those in the identities slice, all users
+// are notified asynchronously. Caller should Wait() on the returned
+// sync.WaitGroup before triggering any policy updates. Policy updates may need
+// Endpoint locks, so this Wait() can deadlock if the caller is holding any
+// endpoint locks.
+func (sc *SelectorCache) UpdateFQDNSelector(fqdnSelec api.FQDNSelector, identities []identity.NumericIdentity, wg *sync.WaitGroup) {
+ sc.mutex.Lock()
+ identitiesToRelease := sc.updateFQDNSelector(fqdnSelec, identities, wg)
+ sc.mutex.Unlock()
+ sc.releaseIdentityMappings(identitiesToRelease)
+}
+
+func (sc *SelectorCache) updateFQDNSelector(fqdnSelec api.FQDNSelector, identities []identity.NumericIdentity, wg *sync.WaitGroup) (identitiesToRelease []identity.NumericIdentity) {
+ fqdnKey := fqdnSelec.String()
+
+ var fqdnSel *fqdnSelector
+
+ selector, exists := sc.selectors[fqdnKey]
+ if !exists || selector == nil {
+ fqdnSel = &fqdnSelector{
+ selectorManager: selectorManager{
+ key: fqdnKey,
+ users: make(map[CachedSelectionUser]struct{}),
+ cachedSelections: make(map[identity.NumericIdentity]struct{}),
+ },
+ selector: fqdnSelec,
+ }
+ sc.selectors[fqdnKey] = fqdnSel
+ } else {
+ fqdnSel = selector.(*fqdnSelector)
+ }
+
+ // All identities handed into this function must have their references
+ // released at some point. This may occur because the incoming
+ // 'identities' slice is signalling that all identities should be
+ // deleted from the selector or because there are duplicates between
+ // 'identities' and the existing cached selections.
+ //
+ // Accumulate these and return them to the caller for deallocation
+ // outside the sc.mutex critical section.
+ maxToRelease := len(identities) + len(fqdnSel.cachedSelections)
+ identitiesToRelease = make([]identity.NumericIdentity, 0, maxToRelease)
+
+ // Convert identity slice to map for comparison with cachedSelections map.
+ idsAsMap := make(map[identity.NumericIdentity]struct{}, len(identities))
+ for _, v := range identities {
+ if _, exists := idsAsMap[v]; exists {
+ identitiesToRelease = append(identitiesToRelease, v)
+ } else {
+ idsAsMap[v] = struct{}{}
+ }
+ }
+
+ // Note that 'added' and 'deleted' are guaranteed to be
+ // disjoint, as one of them is left as nil, or an identity
+ // being in 'identities' is a precondition for an
+ // identity to be appended to 'added', while the inverse is
+ // true for 'deleted'.
+ var added, deleted []identity.NumericIdentity
+
+ /* TODO - the FQDN side should expose what was changed (IPs added, and removed)
+ * not all IPs corresponding to an FQDN - this will make this diff much
+ * cheaper, but will require more plumbing on the FQDN side. for now, this
+ * is good enough.
+ *
+ * Case 1: identities did correspond to this FQDN, but no longer do. Reset
+ * the map
+ */
+ if len(identities) == 0 && len(fqdnSel.cachedSelections) != 0 {
+ // Need to update deleted to be all in cached selections
+ for k := range fqdnSel.cachedSelections {
+ deleted = append(deleted, k)
+ identitiesToRelease = append(identitiesToRelease, k)
+ }
+ fqdnSel.cachedSelections = make(map[identity.NumericIdentity]struct{})
+ } else if len(identities) != 0 && len(fqdnSel.cachedSelections) == 0 {
+ // Case 2: identities now correspond to this FQDN, but didn't before.
+ // We don't have to do any comparison of the maps to see what changed
+ // and what didn't.
+ added = identities
+ fqdnSel.cachedSelections = idsAsMap
+ } else {
+ // Case 3: Something changed resulting in some identities being added
+ // and / or removed. Figure out what these sets are (new identities
+ // added, or identities deleted).
+ for k := range fqdnSel.cachedSelections {
+ // If identity in cached selectors isn't in identities which were
+ // passed in, mark it as being deleted, and remove it from
+ // cachedSelectors.
+ if _, ok := idsAsMap[k]; !ok {
+ deleted = append(deleted, k)
+ delete(fqdnSel.cachedSelections, k)
+ }
+
+ // This function is passed a complete set of the new
+ // identities to associate with this selector, and each
+ // identity already has a reference count. Therefore,
+ // in order to balance references to the same
+ // identities, we should always remove references to
+ // identities that were preveiously selected by this
+ // selector.
+ identitiesToRelease = append(identitiesToRelease, k)
+ }
+
+ // Now iterate over the provided identities to update the
+ // cachedSelections accordingly, and so we can see which identities
+ // were actually added (removing those which were added already).
+ for _, allowedIdentity := range identities {
+ if _, ok := fqdnSel.cachedSelections[allowedIdentity]; !ok {
+ // This identity was actually added and not already in the map.
+ added = append(added, allowedIdentity)
+ fqdnSel.cachedSelections[allowedIdentity] = struct{}{}
+ }
+ }
+ }
+
+ // Note: we don't need to go through the identity cache to see what
+ // identities match" this selector. This has to be updated via whatever is
+ // getting the CIDR identities which correspond to this FQDNSelector. This
+ // is the primary difference here between FQDNSelector and IdentitySelector.
+ fqdnSel.updateSelections()
+ fqdnSel.notifyUsers(sc, added, deleted, wg) // disjoint sets, see the comment above
+
+ return identitiesToRelease
+}
+
+// AddFQDNSelector adds the given api.FQDNSelector in to the selector cache. If
+// an identical EndpointSelector has already been cached, the corresponding
+// CachedSelector is returned, otherwise one is created and added to the cache.
+func (sc *SelectorCache) AddFQDNSelector(user CachedSelectionUser, lbls labels.LabelArray, fqdnSelec api.FQDNSelector) (cachedSelector CachedSelector, added bool) {
+ key := fqdnSelec.String()
+
+ // Lock NameManager before the SelectorCache
+ sc.localIdentityNotifier.Lock()
+ defer sc.localIdentityNotifier.Unlock()
+
+ // If the selector already exists, use it.
+ sc.mutex.Lock()
+ fqdnSel, exists := sc.selectors[key]
+ if exists {
+ added := fqdnSel.addUser(user)
+ sc.mutex.Unlock()
+ return fqdnSel, added
+ }
+ sc.mutex.Unlock()
+
+ // Create the new selector. Pulling the identities it selects could
+ // cause allocation of new CIDR identities, so we do this while not
+ // holding the 'sc.mutex'.
+ newFQDNSel := &fqdnSelector{
+ selectorManager: selectorManager{
+ key: key,
+ users: make(map[CachedSelectionUser]struct{}),
+ cachedSelections: make(map[identity.NumericIdentity]struct{}),
+ metadataLbls: lbls,
+ },
+ selector: fqdnSelec,
+ }
+
+ // Make the FQDN subsystem aware of this selector and fetch identities
+ // that the FQDN subsystem is aware of.
+ //
+ // If the same 'fqdnSelec' is registered twice here from different
+ // goroutines, we do *NOT* need to unregister the second one because
+ // 'fqdnSelec' is just a struct passed by value. The call below doesn't
+ // retain any references/pointers.
+ //
+ // If this is called twice, one of the results will arbitrarily contain
+ // a real slice of ids, while the other will receive nil. We must fold
+ // them together below.
+ sc.localIdentityNotifier.RegisterForIdentityUpdatesLocked(newFQDNSel.selector)
+ selectors := map[api.FQDNSelector]struct{}{newFQDNSel.selector: {}}
+ _, selectorIPMapping := sc.localIdentityNotifier.MapSelectorsToIPsLocked(selectors)
+
+ // Allocate identities corresponding to the slice of IPs identified as
+ // being selected by this FQDN selector above. This could plausibly
+ // happen twice, once with an empty 'ids' slice and once with the real
+ // 'ids' slice. Either way, they are added to the selector that is
+ // stored in 'sc.selectors[]'.
+ currentlyAllocatedIdentities := sc.allocateIdentityMappings(fqdnSelec, selectorIPMapping)
+
+ // Note: No notifications are sent for the existing
+ // identities. Caller must use GetSelections() to get the
+ // current selections after adding a selector. This way the
+ // behavior is the same between the two cases here (selector
+ // is already cached, or is a new one).
+
+ sc.mutex.Lock()
+ // Check whether the selectorCache was updated while 'newFQDNSel' was
+ // being registered without the 'sc.mutex'. If so, use it. Otherwise
+ // we can use the one we just created/configured above.
+ if sel, exists := sc.selectors[key]; exists {
+ newFQDNSel = sel.(*fqdnSelector)
+ } else {
+ sc.selectors[key] = newFQDNSel
+ }
+ identitiesToRelease := newFQDNSel.transferIdentityReferencesToSelector(currentlyAllocatedIdentities)
+ newFQDNSel.updateSelections()
+ added = newFQDNSel.addUser(user)
+ sc.mutex.Unlock()
+
+ sc.releaseIdentityMappings(identitiesToRelease)
+
+ return newFQDNSel, added
+}
+
+// FindCachedIdentitySelector finds the given api.EndpointSelector in the
+// selector cache, returning nil if one can not be found.
+func (sc *SelectorCache) FindCachedIdentitySelector(selector api.EndpointSelector) CachedSelector {
+ key := selector.CachedString()
+ sc.mutex.Lock()
+ idSel := sc.selectors[key]
+ sc.mutex.Unlock()
+ return idSel
+}
+
+// AddIdentitySelector adds the given api.EndpointSelector in to the
+// selector cache. If an identical EndpointSelector has already been
+// cached, the corresponding CachedSelector is returned, otherwise one
+// is created and added to the cache.
+func (sc *SelectorCache) AddIdentitySelector(user CachedSelectionUser, lbls labels.LabelArray, selector api.EndpointSelector) (cachedSelector CachedSelector, added bool) {
+ // The key returned here may be different for equivalent
+ // labelselectors, if the selector's requirements are stored
+ // in different orders. When this happens we'll be tracking
+ // essentially two copies of the same selector.
+ key := selector.CachedString()
+ sc.mutex.Lock()
+ defer sc.mutex.Unlock()
+ idSel, exists := sc.selectors[key]
+ if exists {
+ return idSel, idSel.addUser(user)
+ }
+
+ // Selectors are never modified once a rule is placed in the policy repository,
+ // so no need to deep copy.
+
+ newIDSel := &labelIdentitySelector{
+ selectorManager: selectorManager{
+ key: key,
+ users: make(map[CachedSelectionUser]struct{}),
+ cachedSelections: make(map[identity.NumericIdentity]struct{}),
+ metadataLbls: lbls,
+ },
+ selector: selector,
+ }
+ // check is selector has a namespace match or requirement
+ if namespaces, ok := selector.GetMatch(labels.LabelSourceK8sKeyPrefix + k8sConst.PodNamespaceLabel); ok {
+ newIDSel.namespaces = namespaces
+ }
+
+ // Add the initial user
+ newIDSel.users[user] = struct{}{}
+
+ // Find all matching identities from the identity cache.
+ for numericID, identity := range sc.idCache {
+ if newIDSel.matches(identity) {
+ newIDSel.cachedSelections[numericID] = struct{}{}
+ }
+ }
+ // Create the immutable slice representation of the selected
+ // numeric identities
+ newIDSel.updateSelections()
+
+ // Note: No notifications are sent for the existing
+ // identities. Caller must use GetSelections() to get the
+ // current selections after adding a selector. This way the
+ // behavior is the same between the two cases here (selector
+ // is already cached, or is a new one).
+
+ sc.selectors[key] = newIDSel
+ return newIDSel, true
+}
+
+// lock must be held
+func (sc *SelectorCache) removeSelectorLocked(selector CachedSelector, user CachedSelectionUser) (identitiesToRelease []identity.NumericIdentity) {
+ key := selector.String()
+ sel, exists := sc.selectors[key]
+ if exists {
+ if sel.removeUser(user, sc.localIdentityNotifier) {
+ delete(sc.selectors, key)
+ identitiesToRelease = sel.fetchIdentityMappings()
+ }
+ }
+ return identitiesToRelease
+}
+
+// RemoveSelector removes CachedSelector for the user.
+func (sc *SelectorCache) RemoveSelector(selector CachedSelector, user CachedSelectionUser) {
+ sc.localIdentityNotifier.Lock()
+ sc.mutex.Lock()
+ identitiesToRelease := sc.removeSelectorLocked(selector, user)
+ sc.mutex.Unlock()
+ sc.localIdentityNotifier.Unlock()
+
+ sc.releaseIdentityMappings(identitiesToRelease)
+}
+
+// RemoveSelectors removes CachedSelectorSlice for the user.
+func (sc *SelectorCache) RemoveSelectors(selectors CachedSelectorSlice, user CachedSelectionUser) {
+ var identitiesToRelease []identity.NumericIdentity
+
+ sc.localIdentityNotifier.Lock()
+ sc.mutex.Lock()
+ for _, selector := range selectors {
+ identities := sc.removeSelectorLocked(selector, user)
+ identitiesToRelease = append(identitiesToRelease, identities...)
+ }
+ sc.mutex.Unlock()
+ sc.localIdentityNotifier.Unlock()
+
+ sc.releaseIdentityMappings(identitiesToRelease)
+}
+
+// ChangeUser changes the CachedSelectionUser that gets updates on the
+// updates on the cached selector.
+func (sc *SelectorCache) ChangeUser(selector CachedSelector, from, to CachedSelectionUser) {
+ key := selector.String()
+ sc.mutex.Lock()
+ idSel, exists := sc.selectors[key]
+ if exists {
+ // Add before remove so that the count does not dip to zero in between,
+ // as this causes FQDN unregistration (if applicable).
+ idSel.addUser(to)
+ // ignoring the return value as we have just added a user above
+ idSel.removeUser(from, sc.localIdentityNotifier)
+ }
+ sc.mutex.Unlock()
+}
+
+// UpdateIdentities propagates identity updates to selectors
+//
+// The caller is responsible for making sure the same identity is not
+// present in both 'added' and 'deleted'.
+//
+// Caller should Wait() on the returned sync.WaitGroup before triggering any
+// policy updates. Policy updates may need Endpoint locks, so this Wait() can
+// deadlock if the caller is holding any endpoint locks.
+func (sc *SelectorCache) UpdateIdentities(added, deleted cache.IdentityCache, wg *sync.WaitGroup) {
+ sc.mutex.Lock()
+ defer sc.mutex.Unlock()
+
+ // Update idCache so that newly added selectors get
+ // prepopulated with all matching numeric identities.
+ for numericID := range deleted {
+ if old, exists := sc.idCache[numericID]; exists {
+ log.WithFields(logrus.Fields{
+ logfields.Identity: numericID,
+ logfields.Labels: old.lbls,
+ }).Debug("UpdateIdentities: Deleting identity")
+ delete(sc.idCache, numericID)
+ } else {
+ log.WithFields(logrus.Fields{
+ logfields.Identity: numericID,
+ }).Warning("UpdateIdentities: Skipping Delete of a non-existing identity")
+ delete(deleted, numericID)
+ }
+ }
+ for numericID, lbls := range added {
+ if old, exists := sc.idCache[numericID]; exists {
+ // Skip if no change. Not skipping if label
+ // order is different, but identity labels are
+ // sorted for the kv-store, so there should
+ // not be too many false negatives.
+ if lbls.Equals(old.lbls) {
+ log.WithFields(logrus.Fields{
+ logfields.Identity: numericID,
+ }).Debug("UpdateIdentities: Skipping add of an existing identical identity")
+ delete(added, numericID)
+ continue
+ }
+ scopedLog := log.WithFields(logrus.Fields{
+ logfields.Identity: numericID,
+ logfields.Labels: old.lbls,
+ logfields.Labels + "(new)": lbls},
+ )
+ msg := "UpdateIdentities: Updating an existing identity"
+ // Warn if any other ID has their labels change, besides local
+ // host. The local host can have its labels change at runtime if
+ // the kube-apiserver is running on the local host, see
+ // ipcache.TriggerLabelInjection().
+ if numericID == identity.ReservedIdentityHost {
+ scopedLog.Debug(msg)
+ } else {
+ scopedLog.Warning(msg)
+ }
+ } else {
+ log.WithFields(logrus.Fields{
+ logfields.Identity: numericID,
+ logfields.Labels: lbls,
+ }).Debug("UpdateIdentities: Adding a new identity")
+ }
+ sc.idCache[numericID] = newIdentity(numericID, lbls)
+ }
+
+ if len(deleted)+len(added) > 0 {
+ // Iterate through all locally used identity selectors and
+ // update the cached numeric identities as required.
+ for _, sel := range sc.selectors {
+ var adds, dels []identity.NumericIdentity
+ switch idSel := sel.(type) {
+ case *labelIdentitySelector:
+ for numericID := range deleted {
+ if _, exists := idSel.cachedSelections[numericID]; exists {
+ dels = append(dels, numericID)
+ delete(idSel.cachedSelections, numericID)
+ }
+ }
+ for numericID := range added {
+ if _, exists := idSel.cachedSelections[numericID]; !exists {
+ if idSel.matches(sc.idCache[numericID]) {
+ adds = append(adds, numericID)
+ idSel.cachedSelections[numericID] = struct{}{}
+ }
+ }
+ }
+ if len(dels)+len(adds) > 0 {
+ idSel.updateSelections()
+ idSel.notifyUsers(sc, adds, dels, wg)
+ }
+ case *fqdnSelector:
+ // This is a no-op right now. We don't encode in the identities
+ // which FQDNs they correspond to.
+ }
+ }
+ }
+}
+
+// RemoveIdentitiesFQDNSelectors removes all identities from being mapped to the
+// set of FQDNSelectors.
+func (sc *SelectorCache) RemoveIdentitiesFQDNSelectors(fqdnSels []api.FQDNSelector, wg *sync.WaitGroup) {
+ identitiesToRelease := []identity.NumericIdentity{}
+ sc.mutex.Lock()
+ noIdentities := []identity.NumericIdentity{}
+
+ for i := range fqdnSels {
+ ids := sc.updateFQDNSelector(fqdnSels[i], noIdentities, wg)
+ identitiesToRelease = append(identitiesToRelease, ids...)
+ }
+ sc.mutex.Unlock()
+ sc.releaseIdentityMappings(identitiesToRelease)
+}
+
+// GetNetsLocked returns the most specific CIDR for an identity. For the "World" identity
+// it returns both IPv4 and IPv6.
+func (sc *SelectorCache) GetNetsLocked(id identity.NumericIdentity) []*net.IPNet {
+ ident, ok := sc.idCache[id]
+ if !ok {
+ return nil
+ }
+ if !ident.computed {
+ log.WithFields(logrus.Fields{
+ logfields.Identity: id,
+ logfields.Labels: ident.lbls,
+ }).Warning("GetNetsLocked: Identity with missing nets!")
+ }
+ return ident.nets
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/policy/trafficdirection/doc.go b/vendor/github.com/cilium/cilium/pkg/policy/trafficdirection/doc.go
new file mode 100644
index 000000000..825578188
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/policy/trafficdirection/doc.go
@@ -0,0 +1,6 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// package trafficdirection specifies the directionality of policy in a
+// numeric representation.
+package trafficdirection
diff --git a/vendor/github.com/cilium/cilium/pkg/policy/trafficdirection/trafficdirection.go b/vendor/github.com/cilium/cilium/pkg/policy/trafficdirection/trafficdirection.go
new file mode 100644
index 000000000..9b530cfc5
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/policy/trafficdirection/trafficdirection.go
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package trafficdirection
+
+// TrafficDirection specifies the directionality of policy (ingress or egress).
+type TrafficDirection uint8
+
+const (
+ // Invalid represents an invalid traffic direction.
+ Invalid TrafficDirection = 2
+
+ // Egress represents egress traffic.
+ Egress TrafficDirection = 1
+
+ // Ingress represents ingress traffic.
+ Ingress TrafficDirection = 0
+)
+
+// Uint8 normalizes the TrafficDirection for insertion into BPF maps.
+func (td TrafficDirection) Uint8() uint8 {
+ return uint8(td)
+}
+
+func (td TrafficDirection) String() string {
+ if td == Egress {
+ return "Egress"
+ } else if td == Ingress {
+ return "Ingress"
+ }
+
+ return "Unknown"
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/policy/trigger.go b/vendor/github.com/cilium/cilium/pkg/policy/trigger.go
new file mode 100644
index 000000000..f0410b9ac
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/policy/trigger.go
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package policy
+
+import (
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/cilium/cilium/pkg/endpoint/regeneration"
+ "github.com/cilium/cilium/pkg/metrics"
+ "github.com/cilium/cilium/pkg/option"
+ "github.com/cilium/cilium/pkg/trigger"
+)
+
+// TriggerPolicyUpdates triggers the policy update trigger.
+//
+// To follow what the trigger does, see NewUpdater.
+func (u *Updater) TriggerPolicyUpdates(force bool, reason string) {
+ if force {
+ log.Debugf("Artificially increasing policy revision to enforce policy recalculation")
+ u.repo.BumpRevision()
+ }
+
+ u.TriggerWithReason(reason)
+}
+
+// NewUpdater returns a new Updater instance to handle triggering policy
+// updates ready for use.
+func NewUpdater(r *Repository, regen regenerator) (*Updater, error) {
+ t, err := trigger.NewTrigger(trigger.Parameters{
+ Name: "policy_update",
+ MetricsObserver: &TriggerMetrics{},
+ MinInterval: option.Config.PolicyTriggerInterval,
+ // Triggers policy updates for every local endpoint.
+ // This may be called in a variety of situations: after policy changes,
+ // changes in agent configuration, changes in endpoint labels, and
+ // change of security identities.
+ TriggerFunc: func(reasons []string) {
+ log.Debug("Regenerating all endpoints")
+ reason := strings.Join(reasons, ", ")
+
+ regenerationMetadata := ®eneration.ExternalRegenerationMetadata{
+ Reason: reason,
+ RegenerationLevel: regeneration.RegenerateWithoutDatapath,
+ }
+ regen.RegenerateAllEndpoints(regenerationMetadata)
+ },
+ })
+ if err != nil {
+ return nil, err
+ }
+ return &Updater{
+ Trigger: t,
+ repo: r,
+ }, nil
+}
+
+// Updater is responsible for triggering policy updates, in order to perform
+// policy recalculation.
+type Updater struct {
+ *trigger.Trigger
+
+ repo *Repository
+}
+
+type regenerator interface {
+ // RegenerateAllEndpoints should trigger a regeneration of all endpoints.
+ RegenerateAllEndpoints(*regeneration.ExternalRegenerationMetadata) *sync.WaitGroup
+}
+
+// TriggerMetrics handles the metrics for trigger policy recalculations.
+type TriggerMetrics struct{}
+
+func (p *TriggerMetrics) QueueEvent(reason string) {
+ if metrics.TriggerPolicyUpdateTotal.IsEnabled() {
+ metrics.TriggerPolicyUpdateTotal.WithLabelValues(reason).Inc()
+ }
+}
+
+func (p *TriggerMetrics) PostRun(duration, latency time.Duration, folds int) {
+ if metrics.TriggerPolicyUpdateCallDuration.IsEnabled() {
+ metrics.TriggerPolicyUpdateCallDuration.WithLabelValues("duration").Observe(duration.Seconds())
+ metrics.TriggerPolicyUpdateCallDuration.WithLabelValues("latency").Observe(latency.Seconds())
+ }
+ if metrics.TriggerPolicyUpdateFolds.IsEnabled() {
+ metrics.TriggerPolicyUpdateFolds.Set(float64(folds))
+ }
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/policy/utils.go b/vendor/github.com/cilium/cilium/pkg/policy/utils.go
new file mode 100644
index 000000000..fa3c48590
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/policy/utils.go
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package policy
+
+import "github.com/cilium/cilium/pkg/labels"
+
+// JoinPath returns a joined path from a and b.
+func JoinPath(a, b string) string {
+ return a + labels.PathDelimiter + b
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/policy/visibility.go b/vendor/github.com/cilium/cilium/pkg/policy/visibility.go
new file mode 100644
index 000000000..2f106ada3
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/policy/visibility.go
@@ -0,0 +1,218 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package policy
+
+import (
+ "fmt"
+ "regexp"
+ "strconv"
+ "strings"
+
+ "github.com/cilium/cilium/pkg/policy/api"
+ "github.com/cilium/cilium/pkg/u8proto"
+)
+
+var (
+ singleAnnotationRegex = "<(Ingress|Egress)/([1-9][0-9]{1,5})/(TCP|UDP|SCTP|ANY)/([A-Za-z]{3,32})>"
+ annotationRegex = regexp.MustCompile(fmt.Sprintf(`^((%s)(,(%s))*)$`, singleAnnotationRegex, singleAnnotationRegex))
+)
+
+func validateL7ProtocolWithDirection(dir string, proto L7ParserType) error {
+ switch proto {
+ case ParserTypeHTTP:
+ return nil
+ case ParserTypeDNS:
+ if dir == "Egress" {
+ return nil
+ }
+ case ParserTypeKafka:
+ return nil
+ default:
+ return fmt.Errorf("unsupported parser type %s", proto)
+
+ }
+ return fmt.Errorf("%s not allowed with direction %s", proto, dir)
+}
+
+// NewVisibilityPolicy generates the VisibilityPolicy that is encoded in the
+// annotation parameter.
+// Returns an error:
+// - if the annotation does not correspond to the expected
+// format for a visibility annotation.
+// - if there is a conflict between the state encoded in the annotation (e.g.,
+// different L7 protocols for the same L4 port / protocol / traffic direction.
+func NewVisibilityPolicy(anno string) (*VisibilityPolicy, error) {
+ if !annotationRegex.MatchString(anno) {
+ return nil, fmt.Errorf("annotation for proxy visibility did not match expected format %s", annotationRegex.String())
+ }
+
+ nvp := &VisibilityPolicy{
+ Ingress: make(DirectionalVisibilityPolicy),
+ Egress: make(DirectionalVisibilityPolicy),
+ }
+
+ // TODO: look into using regex groups.
+ anSplit := strings.Split(anno, ",")
+ for i := range anSplit {
+ proxyAnnoSplit := strings.Split(anSplit[i], "/")
+ if len(proxyAnnoSplit) != 4 {
+ err := fmt.Errorf("invalid number of fields (%d) in annotation", len(proxyAnnoSplit))
+ return nil, err
+ }
+ // Ingress|Egress
+ // Don't need to validate the content itself, regex already did that.
+ direction := proxyAnnoSplit[0][1:]
+ port := proxyAnnoSplit[1]
+
+ portInt, err := strconv.ParseUint(port, 10, 16)
+ if err != nil {
+ return nil, fmt.Errorf("unable to parse port: %s", err)
+ }
+
+ // Don't need to validate, regex already did that.
+ l4Proto := proxyAnnoSplit[2]
+ u8Prot, err := u8proto.ParseProtocol(l4Proto)
+ if err != nil {
+ return nil, fmt.Errorf("invalid L4 protocol %s", l4Proto)
+ }
+
+ // ANY equates to TCP and UDP in the datapath; the datapath itself does
+ // not support 'Any' protocol paired with a port at L4.
+ var protos []u8proto.U8proto
+ if u8Prot == u8proto.ANY {
+ protos = append(protos, u8proto.TCP)
+ protos = append(protos, u8proto.UDP)
+ protos = append(protos, u8proto.SCTP)
+ } else {
+ protos = append(protos, u8Prot)
+ }
+ // Remove trailing '>'.
+ l7Protocol := L7ParserType(strings.ToLower(proxyAnnoSplit[3][:len(proxyAnnoSplit[3])-1]))
+
+ if err := validateL7ProtocolWithDirection(direction, l7Protocol); err != nil {
+ return nil, err
+ }
+
+ var dvp DirectionalVisibilityPolicy
+ var ingress bool
+ if direction == "Ingress" {
+ dvp = nvp.Ingress
+ ingress = true
+ } else {
+ dvp = nvp.Egress
+ ingress = false
+ }
+
+ for _, prot := range protos {
+ pp := strconv.FormatUint(portInt, 10) + "/" + prot.String()
+ if res, ok := dvp[pp]; ok {
+ if res.Parser != l7Protocol {
+ return nil, fmt.Errorf("duplicate annotations with different L7 protocols %s and %s for %s", res.Parser, l7Protocol, pp)
+ }
+ }
+
+ l7Meta := generateL7AllowAllRules(l7Protocol)
+
+ dvp[pp] = &VisibilityMetadata{
+ Parser: l7Protocol,
+ Port: uint16(portInt),
+ Proto: prot,
+ Ingress: ingress,
+ L7Metadata: l7Meta,
+ }
+ }
+ }
+
+ return nvp, nil
+}
+
+func generateL7AllowAllRules(parser L7ParserType) L7DataMap {
+ var m L7DataMap
+ switch parser {
+ case ParserTypeDNS:
+ m = L7DataMap{}
+ // Create an entry to explicitly allow all at L7 for DNS.
+ emptyL3Selector := &labelIdentitySelector{selectorManager: selectorManager{key: wildcardSelectorKey}, selector: api.WildcardEndpointSelector}
+ m[emptyL3Selector] = &PerSelectorPolicy{
+ L7Rules: api.L7Rules{
+ DNS: []api.PortRuleDNS{
+ {
+ MatchPattern: "*",
+ },
+ },
+ },
+ }
+ }
+ return m
+}
+
+// VisibilityMetadata encodes state about what type of traffic should be
+// redirected to an L7Proxy. Implements the ProxyPolicy interface.
+// TODO: an L4Filter could be composed of this type.
+type VisibilityMetadata struct {
+ // Parser represents the proxy to which traffic should be redirected.
+ Parser L7ParserType
+
+ // Port, in tandem with Proto, signifies which L4 port for which traffic
+ // should be redirected.
+ Port uint16
+
+ // Proto, in tandem with port, signifies which L4 protocol for which traffic
+ // should be redirected.
+ Proto u8proto.U8proto
+
+ // Ingress specifies whether ingress traffic at the given L4 port / protocol
+ // should be redirected to the proxy.
+ Ingress bool
+
+ // L7Metadata encodes optional information what is allowed at L7 for
+ // visibility. Some specific protocol parsers do not need this set for
+ // allowing of traffic (e.g., HTTP), but some do (e.g., DNS).
+ L7Metadata L7DataMap
+}
+
+// DirectionalVisibilityPolicy is a mapping of VisibilityMetadata keyed by
+// L4 Port / L4 Protocol (e.g., 80/TCP) for a given traffic direction (e.g.,
+// ingress or egress). This encodes at which L4 Port / L4 Protocol traffic
+// should be redirected to a given L7 proxy. An empty instance of this type
+// indicates that no traffic should be redirected.
+type DirectionalVisibilityPolicy map[string]*VisibilityMetadata
+
+// VisibilityPolicy represents for both ingress and egress which types of
+// traffic should be redirected to a given L7 proxy.
+type VisibilityPolicy struct {
+ Ingress DirectionalVisibilityPolicy
+ Egress DirectionalVisibilityPolicy
+ Error error
+}
+
+// CopyL7RulesPerEndpoint returns a shallow copy of the L7Metadata of the
+// L4Filter.
+func (v *VisibilityMetadata) CopyL7RulesPerEndpoint() L7DataMap {
+ if v.L7Metadata != nil {
+ return v.L7Metadata.ShallowCopy()
+ }
+ return nil
+}
+
+// GetL7Parser returns the L7ParserType for this VisibilityMetadata.
+func (v *VisibilityMetadata) GetL7Parser() L7ParserType {
+ return v.Parser
+}
+
+// GetIngress returns whether the VisibilityMetadata applies at ingress or
+// egress.
+func (v *VisibilityMetadata) GetIngress() bool {
+ return v.Ingress
+}
+
+// GetPort returns at which port the VisibilityMetadata applies.
+func (v *VisibilityMetadata) GetPort() uint16 {
+ return v.Port
+}
+
+// GetListener returns the optional listener name.
+func (l4 *VisibilityMetadata) GetListener() string {
+ return ""
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/promise/promise.go b/vendor/github.com/cilium/cilium/pkg/promise/promise.go
new file mode 100644
index 000000000..1164b9cf1
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/promise/promise.go
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package promise
+
+import (
+ "context"
+ "sync"
+
+ "github.com/cilium/cilium/pkg/lock"
+)
+
+// A promise for a future value.
+type Promise[T any] interface {
+ // Await blocks until the value is resolved or rejected.
+ Await(context.Context) (T, error)
+}
+
+// Resolver can resolve or reject a promise.
+// These methods are separate from 'Promise' to make it clear where the promise is resolved
+// from.
+type Resolver[T any] interface {
+ // Resolve a promise. Unblocks all Await()s. Future calls of Await()
+ // return the resolved value immediately.
+ //
+ // Only the first call to resolve (or reject) has an effect and
+ // further calls are ignored.
+ Resolve(T)
+
+ // Reject a promise with an error.
+ Reject(error)
+}
+
+// New creates a new promise for value T.
+// Returns a resolver and the promise.
+func New[T any]() (Resolver[T], Promise[T]) {
+ promise := &promise[T]{}
+ promise.cond = sync.NewCond(promise)
+ return promise, promise
+}
+
+const (
+ promiseUnresolved = iota
+ promiseResolved
+ promiseRejected
+)
+
+type promise[T any] struct {
+ lock.Mutex
+ cond *sync.Cond
+ state int
+ value T
+ err error
+}
+
+// Resolve informs all other codepaths who are Await()ing on the received
+// promise that T is now successfully initialized and available for usage.
+//
+// Initialization logic for T should either call Resolve() or Reject(), and
+// must not call these functions more than once.
+func (p *promise[T]) Resolve(value T) {
+ p.Lock()
+ defer p.Unlock()
+ if p.state != promiseUnresolved {
+ return
+ }
+ p.state = promiseResolved
+ p.value = value
+ p.cond.Broadcast()
+}
+
+// Reject informs all other codepaths who are Await()ing on the received
+// promise that T could not be initialized and cannot be used to due the
+// specified error reason.
+//
+// Initialization logic for T should either call Resolve() or Reject(), and
+// must not call these functions more than once.
+func (p *promise[T]) Reject(err error) {
+ p.Lock()
+ defer p.Unlock()
+ if p.state != promiseUnresolved {
+ return
+ }
+ p.state = promiseRejected
+ p.err = err
+ p.cond.Broadcast()
+}
+
+// Await blocks until the promise has been resolved, rejected or context cancelled.
+func (p *promise[T]) Await(ctx context.Context) (value T, err error) {
+ // Fork off a goroutine to wait for cancellation and wake up.
+ ctx, cancel := context.WithCancel(ctx)
+ defer cancel()
+ go func() {
+ <-ctx.Done()
+ p.cond.Broadcast()
+ }()
+
+ p.Lock()
+ defer p.Unlock()
+
+ // Wait until the promise is resolved or context cancelled.
+ for p.state == promiseUnresolved && (ctx == nil || ctx.Err() == nil) {
+ p.cond.Wait()
+ }
+
+ if ctx.Err() != nil {
+ err = ctx.Err()
+ } else if p.state == promiseResolved {
+ value = p.value
+ } else {
+ err = p.err
+ }
+ return
+}
+
+type wrappedPromise[T any] func(context.Context) (T, error)
+
+func (await wrappedPromise[T]) Await(ctx context.Context) (T, error) {
+ return await(ctx)
+}
+
+// Map transforms the value of a promise with the provided function.
+func Map[A, B any](p Promise[A], transform func(A) B) Promise[B] {
+ return wrappedPromise[B](func(ctx context.Context) (out B, err error) {
+ v, err := p.Await(ctx)
+ if err != nil {
+ return out, err
+ }
+ return transform(v), nil
+ })
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/proxy/accesslog/record.go b/vendor/github.com/cilium/cilium/pkg/proxy/accesslog/record.go
new file mode 100644
index 000000000..1044fc67e
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/proxy/accesslog/record.go
@@ -0,0 +1,303 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package accesslog
+
+import (
+ "net"
+ "net/http"
+ "net/url"
+)
+
+// FlowType is the type to indicate the flow direction
+type FlowType string
+
+const (
+ // TypeRequest is a request message
+ TypeRequest FlowType = "Request"
+
+ // TypeResponse is a response to a request
+ TypeResponse FlowType = "Response"
+
+ // TypeSample is a packet sample
+ TypeSample FlowType = "Sample"
+)
+
+// FlowVerdict is the verdict passed on the flow
+type FlowVerdict string
+
+const (
+ // VerdictForwarded indicates that the flow was forwarded
+ VerdictForwarded FlowVerdict = "Forwarded"
+
+ // VerdictDenied indicates that the flow was denied
+ VerdictDenied = "Denied"
+
+ // VerdictError indicates that there was an error processing the flow
+ VerdictError = "Error"
+
+ // VerdictError indicates that the flow was redirected through the proxy
+ VerdictRedirected = "Redirected"
+)
+
+// ObservationPoint is the type used to describe point of observation
+type ObservationPoint string
+
+const (
+ // Ingress indicates event was generated at ingress
+ Ingress ObservationPoint = "Ingress"
+
+ // Egress indicates event was generated at egress
+ Egress ObservationPoint = "Egress"
+)
+
+// IPVersion indicates the flow's IP version
+type IPVersion uint8
+
+const (
+ // VersionIPv4 indicates IPv4
+ VersionIPv4 IPVersion = iota
+ // VersionIPV6 indicates IPv6
+ VersionIPV6
+)
+
+// EndpointInfo contains information about the sending (resp. receiving) endpoint.
+// If the field using this struct is SourceEndpoint, all fields correspond to
+// the sending endpoint, if the field using this struct is DestinationEndpoint,
+// then all fields correspond to the receiving endpoint.
+type EndpointInfo struct {
+ // ID is the endpoint id
+ ID uint64
+
+ // IPv4 is the IPv4 address of the endpoint
+ IPv4 string
+
+ // IPv6 is the IPv6 address of the endpoint
+ IPv6 string
+
+ // Port represents the source point for SourceEndpoint and the
+ // destination port for DestinationEndpoint
+ Port uint16
+
+ // Identity is the security identity of the endpoint
+ Identity uint64
+
+ // Labels is the list of security relevant labels of the endpoint
+ Labels []string
+}
+
+// ServiceInfo contains information about the Kubernetes service
+type ServiceInfo struct {
+ // Name specifies the name of the service
+ Name string
+
+ // IPPort is the IP and transport port of the service
+ IPPort IPPort
+}
+
+// FlowEvent identifies the event type of an L4 log record
+type FlowEvent string
+
+const (
+ // FlowAdded means that this is a new flow
+ FlowAdded FlowEvent = "FlowAdded"
+
+ // FlowRemoved means that a flow has been deleted
+ FlowRemoved FlowEvent = "FlowRemoved"
+)
+
+// DropReason indicates the reason why the flow was dropped
+type DropReason uint16
+
+// TransportProtocol defines layer 4 protocols
+type TransportProtocol uint16
+
+// NodeAddressInfo holds addressing information of the node the agent runs on
+type NodeAddressInfo struct {
+ IPv4 string
+ IPv6 string
+}
+
+// IPPort bundles an IP address and port number
+type IPPort struct {
+ IP string
+ Port uint16
+}
+
+// LogRecord is the structure used to log individual request/response
+// processing events or sampled packets
+type LogRecord struct {
+ // Type is the type of the flow
+ Type FlowType
+
+ // Timestamp is the start of a request, the end of a response, or the time the packet has been sampled,
+ // depending on the flow type
+ Timestamp string
+
+ // NodeAddressInfo contains the IPs of the node where the event was generated
+ NodeAddressInfo NodeAddressInfo
+
+ // ObservationPoint indicates where the flow was observed
+ ObservationPoint ObservationPoint
+
+ // SourceEndpoint is information about the source endpoint, if available
+ SourceEndpoint EndpointInfo
+
+ // DestinationEndpoint is information about the destination endpoint, if available
+ DestinationEndpoint EndpointInfo
+
+ // IPVersion indicates the version of the IP protocol in use
+ IPVersion IPVersion
+
+ // Verdict is the verdict on the flow taken
+ Verdict FlowVerdict
+
+ // Info includes information about the rule that matched or the error
+ // that occurred
+ Info string
+
+ // Metadata is additional arbitrary metadata
+ Metadata []string
+
+ // TransportProtocol identifies the flow's transport layer (layer 4) protocol
+ TransportProtocol TransportProtocol
+
+ // FlowEvent identifies the flow event for L4 log record
+ FlowEvent FlowEvent
+
+ // ServiceInfo identifies the Kubernetes service this flow went through. It is set to
+ // nil if the flow did not go though any service. Note that this field is always set to
+ // nil if ObservationPoint is Ingress since currently Cilium cannot tell at ingress
+ // whether the packet went through a service before.
+ ServiceInfo *ServiceInfo
+
+ // DropReason indicates the reason of the drop. This field is set if and only if
+ // the Verdict field is set to VerdictDenied. Otherwise it's set to nil.
+ DropReason *DropReason
+
+ // The following are the protocol specific parts. Only one of the
+ // following should ever be set. Unused fields will be omitted
+
+ // HTTP contains information for HTTP request/responses
+ HTTP *LogRecordHTTP `json:"HTTP,omitempty"`
+
+ // Kafka contains information for Kafka request/responses
+ Kafka *LogRecordKafka `json:"Kafka,omitempty"`
+
+ // DNS contains information for DNS request/responses
+ DNS *LogRecordDNS `json:"DNS,omitempty"`
+
+ // L7 contains information about generic L7 protocols
+ L7 *LogRecordL7 `json:"L7,omitempty"`
+}
+
+// LogRecordHTTP contains the HTTP specific portion of a log record
+type LogRecordHTTP struct {
+ // Code is the HTTP code being returned
+ Code int
+
+ // Method is the method of the request
+ Method string
+
+ // URL is the URL of the request
+ URL *url.URL
+
+ // Protocol is the HTTP protocol in use
+ Protocol string
+
+ // Headers are all HTTP headers present in the request and response. Request records
+ // contain request headers, while response headers contain response headers and the
+ // 'x-request-id' from the request headers, if any. If response headers already contain
+ // a 'x-request-id' with a different value then both will be included as two separate
+ // entries with the same key.
+ Headers http.Header
+
+ // MissingHeaders are HTTP request headers that were deemed missing from the request
+ MissingHeaders http.Header
+
+ // RejectedHeaders are HTTP request headers that were rejected from the request
+ RejectedHeaders http.Header
+}
+
+// KafkaTopic contains the topic for requests
+type KafkaTopic struct {
+ Topic string `json:"Topic,omitempty"`
+}
+
+// LogRecordKafka contains the Kafka-specific portion of a log record
+type LogRecordKafka struct {
+ // ErrorCode is the Kafka error code being returned
+ ErrorCode int
+
+ // APIVersion of the Kafka api used
+ APIVersion int16
+
+ // APIKey for Kafka message
+ // Reference: https://kafka.apache.org/protocol#protocol_api_keys
+ APIKey string
+
+ // CorrelationID is a user-supplied integer value that will be passed
+ // back with the response
+ CorrelationID int32
+
+ // Topic of the request, currently is a single topic
+ // Note that this string can be empty since not all messages use
+ // Topic. example: LeaveGroup, Heartbeat
+ Topic KafkaTopic
+}
+
+type DNSDataSource string
+
+const (
+ // DNSSourceProxy indicates that the DNS record was created by a proxy
+ // intercepting a DNS request/response.
+ DNSSourceProxy DNSDataSource = "proxy"
+)
+
+// LogRecordDNS contains the DNS specific portion of a log record
+type LogRecordDNS struct {
+ // Query is the name in the original query
+ Query string `json:"Query,omitempty"`
+
+ // IPs are any IPs seen in this response.
+ // This field is filled only for DNS responses with IPs.
+ IPs []net.IP `json:"IPs,omitempty"`
+
+ // TTL is the lowest applicable TTL for this data
+ // This field is filled only for DNS responses.
+ TTL uint32 `json:"TTL,omitempty"`
+
+ // CNAMEs are any CNAME records seen in the response leading from Query
+ // to the IPs.
+ // This field is filled only for DNS responses with CNAMEs to IP data.
+ CNAMEs []string `json:"CNAMEs,omitempty"`
+
+ // ObservationSource represents the source of the data in this LogRecordDNS.
+ // Empty or undefined may indicate older cilium versions, as it is expected
+ // to be filled in.
+ ObservationSource DNSDataSource `json:"ObservationSource,omitempty"`
+
+ // RCode is the response code
+ // defined as per https://www.iana.org/assignments/dns-parameters/dns-parameters.xhtml#dns-parameters-6
+ // Use github.com/cilium/dns.RcodeToString map to retrieve string representation
+ RCode int `json:"RCode,omitempty"`
+
+ // QTypes are question types in DNS message
+ // https://www.ietf.org/rfc/rfc1035.txt
+ // Use github.com/cilium/dns.TypeToString map to retrieve string representation
+ QTypes []uint16 `json:"QTypes,omitempty"`
+
+ // AnswerTypes are record types in the answer section
+ // https://www.iana.org/assignments/dns-parameters/dns-parameters.xhtml#dns-parameters-4
+ // Use github.com/cilium/dns.TypeToString map to retrieve string representation
+ AnswerTypes []uint16 `json:"AnswerTypes,omitempty"`
+}
+
+// LogRecordL7 contains the generic L7 portion of a log record
+type LogRecordL7 struct {
+ // Proto is the name of the protocol this record represents
+ Proto string `json:"Proto,omitempty"`
+
+ // Fields is a map of key-value pairs describing the protocol
+ Fields map[string]string
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/rand/rand_name.go b/vendor/github.com/cilium/cilium/pkg/rand/rand_name.go
new file mode 100644
index 000000000..77671fdcf
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/rand/rand_name.go
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package rand
+
+import (
+ "time"
+)
+
+// Stolen from:
+// https://stackoverflow.com/questions/22892120/how-to-generate-a-random-string-of-a-fixed-length-in-golang
+
+var (
+ randGen = NewSafeRand(time.Now().UnixNano())
+
+ letterRunes = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
+)
+
+// RandomStringWithPrefix returns a random string of length n + len(prefix) with
+// the given prefix, containing upper- and lowercase runes.
+func RandomStringWithPrefix(prefix string, n int) string {
+ return prefix + RandomStringWithLen(n)
+}
+
+func randomStringFromSliceWithLen(runes []rune, n int) string {
+ b := make([]rune, n)
+ for i := range b {
+ b[i] = letterRunes[randGen.Intn(len(runes))]
+ }
+ return string(b)
+}
+
+// RandomStringWithLen returns a random string of specified length containing
+// upper- and lowercase runes.
+func RandomStringWithLen(n int) string {
+ return randomStringFromSliceWithLen(letterRunes, n)
+}
+
+// RandomString returns a random string with a predefined length of 12.
+func RandomString() string {
+ return RandomStringWithLen(12)
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/rand/safe_rand.go b/vendor/github.com/cilium/cilium/pkg/rand/safe_rand.go
new file mode 100644
index 000000000..9829484a1
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/rand/safe_rand.go
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package rand
+
+import (
+ "math/rand"
+
+ "github.com/cilium/cilium/pkg/lock"
+)
+
+// SafeRand is a concurrency-safe source of pseudo-random numbers. The Go
+// stdlib's math/rand.Source is not concurrency-safe. The global source in
+// math/rand would be concurrency safe (due to its internal use of
+// lockedSource), but it is prone to inter-package interference with the PRNG
+// state.
+// Also see https://github.com/cilium/cilium/issues/10988
+type SafeRand struct {
+ mu lock.Mutex
+ r *rand.Rand
+}
+
+func NewSafeRand(seed int64) *SafeRand {
+ return &SafeRand{r: rand.New(rand.NewSource(seed))}
+}
+
+func (sr *SafeRand) Seed(seed int64) {
+ sr.mu.Lock()
+ sr.r.Seed(seed)
+ sr.mu.Unlock()
+}
+
+func (sr *SafeRand) Int63() int64 {
+ sr.mu.Lock()
+ v := sr.r.Int63()
+ sr.mu.Unlock()
+ return v
+}
+
+func (sr *SafeRand) Int63n(n int64) int64 {
+ sr.mu.Lock()
+ v := sr.r.Int63n(n)
+ sr.mu.Unlock()
+ return v
+}
+
+func (sr *SafeRand) Uint32() uint32 {
+ sr.mu.Lock()
+ v := sr.r.Uint32()
+ sr.mu.Unlock()
+ return v
+}
+
+func (sr *SafeRand) Uint64() uint64 {
+ sr.mu.Lock()
+ v := sr.r.Uint64()
+ sr.mu.Unlock()
+ return v
+}
+
+func (sr *SafeRand) Intn(n int) int {
+ sr.mu.Lock()
+ v := sr.r.Intn(n)
+ sr.mu.Unlock()
+ return v
+}
+
+func (sr *SafeRand) Float64() float64 {
+ sr.mu.Lock()
+ v := sr.r.Float64()
+ sr.mu.Unlock()
+ return v
+}
+
+func (sr *SafeRand) Perm(n int) []int {
+ sr.mu.Lock()
+ v := sr.r.Perm(n)
+ sr.mu.Unlock()
+ return v
+
+}
+
+func (sr *SafeRand) Shuffle(n int, swap func(i, j int)) {
+ sr.mu.Lock()
+ sr.r.Shuffle(n, swap)
+ sr.mu.Unlock()
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/rate/api_limiter.go b/vendor/github.com/cilium/cilium/pkg/rate/api_limiter.go
new file mode 100644
index 000000000..93c5507f0
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/rate/api_limiter.go
@@ -0,0 +1,897 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package rate
+
+import (
+ "context"
+ "fmt"
+ "math"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/google/uuid"
+ "github.com/sirupsen/logrus"
+ "golang.org/x/sync/semaphore"
+ "golang.org/x/time/rate"
+
+ "github.com/cilium/cilium/pkg/lock"
+ "github.com/cilium/cilium/pkg/logging"
+ "github.com/cilium/cilium/pkg/logging/logfields"
+)
+
+var log = logging.DefaultLogger.WithField(logfields.LogSubsys, "rate")
+
+const (
+ defaultMeanOver = 10
+ defaultDelayedAdjustmentFactor = 0.50
+ defaultMaxAdjustmentFactor = 100.0
+
+ // waitSemaphoreWeight is the maximum resolution of the wait semaphore,
+ // the higher this value, the more accurate the ParallelRequests
+ // requirement is implemented
+ waitSemaphoreResolution = 10000000
+
+ // logUUID is the UUID of the request.
+ logUUID = "uuid"
+ // logAPICallName is the name of the underlying API call, such as
+ // "endpoint-create".
+ logAPICallName = "name"
+ // logProcessingDuration is the time taken to perform the actual underlying
+ // API call such as creating an endpoint or deleting an endpoint. This is
+ // the time between when the request has finished waiting (or being
+ // delayed), to when the underlying action has finished.
+ logProcessingDuration = "processingDuration"
+ // logParallelRequests is the number of allowed parallel requests. See
+ // APILimiter.parallelRequests.
+ logParallelRequests = "parallelRequests"
+ // logMinWaitDuration represents APILimiterParameters.MinWaitDuration.
+ logMinWaitDuration = "minWaitDuration"
+ // logMaxWaitDuration represents APILimiterParameters.MaxWaitDuration.
+ logMaxWaitDuration = "maxWaitDuration"
+ // logMaxWaitDurationLimiter is the actual / calculated maximum threshold
+ // for a request to wait. Any request exceeding this threshold will not be
+ // processed.
+ logMaxWaitDurationLimiter = "maxWaitDurationLimiter"
+ // logWaitDurationLimit is the actual / calculated amount of time
+ // determined by the underlying rate-limiting library that this request
+ // must wait before the rate limiter releases it, so that it can take the
+ // underlying action. See golang.org/x/time/rate.(*Reservation).Delay().
+ logWaitDurationLimit = "waitDurationLimiter"
+ // logWaitDurationTotal is the actual total amount of time that this
+ // request spent waiting to be released by the rate limiter.
+ logWaitDurationTotal = "waitDurationTotal"
+ // logLimit is the rate limit. See APILimiterParameters.RateLimit.
+ logLimit = "limit"
+ // logLimit is the burst rate. See APILimiterParameters.RateBurst.
+ logBurst = "burst"
+ // logTotalDuration is the total time between when the request was first
+ // scheduled (entered the rate limiter) to when it completed processing of
+ // the underlying action. This is the absolute total time of the request
+ // from beginning to end.
+ logTotalDuration = "totalDuration"
+ // logSkipped represents whether the rate limiter will skip rate-limiting
+ // this request. See APILimiterParameters.SkipInitial.
+ logSkipped = "rateLimiterSkipped"
+)
+
+type outcome string
+
+const (
+ outcomeParallelMaxWait outcome = "fail-parallel-wait"
+ outcomeLimitMaxWait outcome = "fail-limit-wait"
+ outcomeReqCancelled outcome = "request-cancelled"
+)
+
+// APILimiter is an extension to x/time/rate.Limiter specifically for Cilium
+// API calls. It allows to automatically adjust the rate, burst and maximum
+// parallel API calls to stay as close as possible to an estimated processing
+// time.
+type APILimiter struct {
+ // name is the name of the API call. This field is immutable after
+ // NewAPILimiter()
+ name string
+
+ // params is the parameters of the limiter. This field is immutable
+ // after NewAPILimiter()
+ params APILimiterParameters
+
+ // metrics points to the metrics implementation provided by the caller
+ // of the APILimiter. This field is immutable after NewAPILimiter()
+ metrics MetricsObserver
+
+ // mutex protects all fields below this line
+ mutex lock.RWMutex
+
+ // meanProcessingDuration is the latest mean processing duration,
+ // calculated based on processingDurations
+ meanProcessingDuration float64
+
+ // processingDurations is the last params.MeanOver processing durations
+ processingDurations []time.Duration
+
+ // meanWaitDuration is the latest mean wait duration, calculated based
+ // on waitDurations
+ meanWaitDuration float64
+
+ // waitDurations is the last params.MeanOver wait durations
+ waitDurations []time.Duration
+
+ // parallelRequests is the currently allowed maximum parallel
+ // requests. This defaults to params.MaxParallel requests and is then
+ // adjusted automatically if params.AutoAdjust is enabled.
+ parallelRequests int
+
+ // adjustmentFactor is the latest adjustment factor. It is the ratio
+ // between params.EstimatedProcessingDuration and
+ // meanProcessingDuration.
+ adjustmentFactor float64
+
+ // limiter is the rate limiter based on params.RateLimit and
+ // params.RateBurst.
+ limiter *rate.Limiter
+
+ // currentRequestsInFlight is the number of parallel API requests
+ // currently in flight
+ currentRequestsInFlight int
+
+ // requestsProcessed is the total number of processed requests
+ requestsProcessed int64
+
+ // requestsScheduled is the total number of scheduled requests
+ requestsScheduled int64
+
+ // parallelWaitSemaphore is the semaphore used to implement
+ // params.MaxParallel. It is initialized with a capacity of
+ // waitSemaphoreResolution and each API request will acquire
+ // waitSemaphoreResolution/params.MaxParallel tokens.
+ parallelWaitSemaphore *semaphore.Weighted
+}
+
+// APILimiterParameters is the configuration of an APILimiter. The structure
+// may not be mutated after it has been passed into NewAPILimiter().
+type APILimiterParameters struct {
+ // EstimatedProcessingDuration is the estimated duration an API call
+ // will take. This value is used if AutoAdjust is enabled to
+ // automatically adjust rate limits to stay as close as possible to the
+ // estimated processing duration.
+ EstimatedProcessingDuration time.Duration
+
+ // AutoAdjust enables automatic adjustment of the values
+ // ParallelRequests, RateLimit, and RateBurst in order to keep the
+ // mean processing duration close to EstimatedProcessingDuration
+ AutoAdjust bool
+
+ // MeanOver is the number of entries to keep in order to calculate the
+ // mean processing and wait duration
+ MeanOver int
+
+ // ParallelRequests is the parallel requests allowed. If AutoAdjust is
+ // enabled, the value will adjust automatically.
+ ParallelRequests int
+
+ // MaxParallelRequests is the maximum parallel requests allowed. If
+ // AutoAdjust is enabled, then the ParalelRequests will never grow
+ // above MaxParallelRequests.
+ MaxParallelRequests int
+
+ // MinParallelRequests is the minimum parallel requests allowed. If
+ // AutoAdjust is enabled, then the ParallelRequests will never fall
+ // below MinParallelRequests.
+ MinParallelRequests int
+
+ // RateLimit is the initial number of API requests allowed per second.
+ // If AutoAdjust is enabled, the value will adjust automatically.
+ RateLimit rate.Limit
+
+ // RateBurst is the initial allowed burst of API requests allowed. If
+ // AutoAdjust is enabled, the value will adjust automatically.
+ RateBurst int
+
+ // MinWaitDuration is the minimum time an API request always has to
+ // wait before the Wait() function returns an error.
+ MinWaitDuration time.Duration
+
+ // MaxWaitDuration is the maximum time an API request is allowed to
+ // wait before the Wait() function returns an error.
+ MaxWaitDuration time.Duration
+
+ // Log enables info logging of processed API requests. This should only
+ // be used for low frequency API calls.
+ Log bool
+
+ // DelayedAdjustmentFactor is percentage of the AdjustmentFactor to be
+ // applied to RateBurst and MaxWaitDuration defined as a value between
+ // 0.0..1.0. This is used to steer a slower reaction of the RateBurst
+ // and ParallelRequests compared to RateLimit.
+ DelayedAdjustmentFactor float64
+
+ // SkipInitial is the number of initial API calls for which to not
+ // apply any rate limiting. This is useful to define a learning phase
+ // in the beginning to allow for auto adjustment before imposing wait
+ // durations and rate limiting on API calls.
+ SkipInitial int
+
+ // MaxAdjustmentFactor is the maximum adjustment factor when AutoAdjust
+ // is enabled. Base values will not adjust more than by this factor.
+ MaxAdjustmentFactor float64
+}
+
+// MergeUserConfig merges the provided user configuration into the existing
+// parameters and returns a new copy.
+func (p APILimiterParameters) MergeUserConfig(config string) (APILimiterParameters, error) {
+ if err := (&p).mergeUserConfig(config); err != nil {
+ return APILimiterParameters{}, err
+ }
+
+ return p, nil
+}
+
+// NewAPILimiter returns a new APILimiter based on the parameters and metrics implementation
+func NewAPILimiter(name string, p APILimiterParameters, metrics MetricsObserver) *APILimiter {
+ if p.MeanOver == 0 {
+ p.MeanOver = defaultMeanOver
+ }
+
+ if p.MinParallelRequests == 0 {
+ p.MinParallelRequests = 1
+ }
+
+ if p.RateBurst == 0 {
+ p.RateBurst = 1
+ }
+
+ if p.DelayedAdjustmentFactor == 0.0 {
+ p.DelayedAdjustmentFactor = defaultDelayedAdjustmentFactor
+ }
+
+ if p.MaxAdjustmentFactor == 0.0 {
+ p.MaxAdjustmentFactor = defaultMaxAdjustmentFactor
+ }
+
+ l := &APILimiter{
+ name: name,
+ params: p,
+ parallelRequests: p.ParallelRequests,
+ parallelWaitSemaphore: semaphore.NewWeighted(waitSemaphoreResolution),
+ metrics: metrics,
+ }
+
+ if p.RateLimit != 0 {
+ l.limiter = rate.NewLimiter(p.RateLimit, p.RateBurst)
+ }
+
+ return l
+}
+
+// NewAPILimiterFromConfig returns a new APILimiter based on user configuration
+func NewAPILimiterFromConfig(name, config string, metrics MetricsObserver) (*APILimiter, error) {
+ p := &APILimiterParameters{}
+
+ if err := p.mergeUserConfig(config); err != nil {
+ return nil, err
+ }
+
+ return NewAPILimiter(name, *p, metrics), nil
+}
+
+func (p *APILimiterParameters) mergeUserConfigKeyValue(key, value string) error {
+ switch strings.ToLower(key) {
+ case "rate-limit":
+ limit, err := parseRate(value)
+ if err != nil {
+ return fmt.Errorf("unable to parse rate %q: %w", value, err)
+ }
+ p.RateLimit = limit
+ case "rate-burst":
+ burst, err := parsePositiveInt(value)
+ if err != nil {
+ return err
+ }
+ p.RateBurst = burst
+ case "min-wait-duration":
+ minWaitDuration, err := time.ParseDuration(value)
+ if err != nil {
+ return fmt.Errorf("unable to parse duration %q: %w", value, err)
+ }
+ p.MinWaitDuration = minWaitDuration
+ case "max-wait-duration":
+ maxWaitDuration, err := time.ParseDuration(value)
+ if err != nil {
+ return fmt.Errorf("unable to parse duration %q: %w", value, err)
+ }
+ p.MaxWaitDuration = maxWaitDuration
+ case "estimated-processing-duration":
+ estProcessingDuration, err := time.ParseDuration(value)
+ if err != nil {
+ return fmt.Errorf("unable to parse duration %q: %w", value, err)
+ }
+ p.EstimatedProcessingDuration = estProcessingDuration
+ case "auto-adjust":
+ v, err := strconv.ParseBool(value)
+ if err != nil {
+ return fmt.Errorf("unable to parse bool %q: %w", value, err)
+ }
+ p.AutoAdjust = v
+ case "parallel-requests":
+ parallel, err := parsePositiveInt(value)
+ if err != nil {
+ return err
+ }
+ p.ParallelRequests = parallel
+ case "min-parallel-requests":
+ minParallel, err := parsePositiveInt(value)
+ if err != nil {
+ return err
+ }
+ p.MinParallelRequests = minParallel
+ case "max-parallel-requests":
+ maxParallel, err := parsePositiveInt(value)
+ if err != nil {
+ return err
+ }
+ p.MaxParallelRequests = int(maxParallel)
+ case "mean-over":
+ meanOver, err := parsePositiveInt(value)
+ if err != nil {
+ return err
+ }
+ p.MeanOver = meanOver
+ case "log":
+ v, err := strconv.ParseBool(value)
+ if err != nil {
+ return fmt.Errorf("unable to parse bool %q: %w", value, err)
+ }
+ p.Log = v
+ case "delayed-adjustment-factor":
+ delayedAdjustmentFactor, err := strconv.ParseFloat(value, 64)
+ if err != nil {
+ return fmt.Errorf("unable to parse float %q: %w", value, err)
+ }
+ p.DelayedAdjustmentFactor = delayedAdjustmentFactor
+ case "max-adjustment-factor":
+ maxAdjustmentFactor, err := strconv.ParseFloat(value, 64)
+ if err != nil {
+ return fmt.Errorf("unable to parse float %q: %w", value, err)
+ }
+ p.MaxAdjustmentFactor = maxAdjustmentFactor
+ case "skip-initial":
+ skipInitial, err := parsePositiveInt(value)
+ if err != nil {
+ return err
+ }
+ p.SkipInitial = skipInitial
+ default:
+ return fmt.Errorf("unknown rate limiting option %q", key)
+ }
+
+ return nil
+}
+
+func (p *APILimiterParameters) mergeUserConfig(config string) error {
+ tokens := strings.Split(config, ",")
+ for _, token := range tokens {
+ if token == "" {
+ continue
+ }
+
+ t := strings.SplitN(token, ":", 2)
+ if len(t) != 2 {
+ return fmt.Errorf("unable to parse rate limit option %q, must in the form name=option:value[,option:value]", token)
+ }
+
+ if err := p.mergeUserConfigKeyValue(t[0], t[1]); err != nil {
+ return fmt.Errorf("unable to parse rate limit option %q with value %q: %w", t[0], t[1], err)
+ }
+ }
+
+ return nil
+}
+
+func (l *APILimiter) Parameters() APILimiterParameters {
+ return l.params
+}
+
+func (l *APILimiter) delayedAdjustment(current, min, max float64) (n float64) {
+ n = current * l.adjustmentFactor
+ n = current + ((n - current) * l.params.DelayedAdjustmentFactor)
+ if min > 0.0 && n < min {
+ n = min
+ }
+ if max > 0.0 && n > max {
+ n = max
+ }
+ return
+}
+
+func (l *APILimiter) calculateAdjustmentFactor() float64 {
+ f := l.params.EstimatedProcessingDuration.Seconds() / l.meanProcessingDuration
+ if f > l.params.MaxAdjustmentFactor {
+ f = l.params.MaxAdjustmentFactor
+ }
+ if f < 1.0/l.params.MaxAdjustmentFactor {
+ f = 1.0 / l.params.MaxAdjustmentFactor
+ }
+ return f
+}
+
+func (l *APILimiter) adjustmentLimit(newValue, initialValue float64) float64 {
+ return math.Max(initialValue/l.params.MaxAdjustmentFactor, math.Min(initialValue*l.params.MaxAdjustmentFactor, newValue))
+}
+
+func (l *APILimiter) adjustedBurst() int {
+ newBurst := l.delayedAdjustment(float64(l.params.RateBurst), float64(l.params.MinParallelRequests), 0.0)
+ return int(math.Round(l.adjustmentLimit(newBurst, float64(l.params.RateBurst))))
+}
+
+func (l *APILimiter) adjustedLimit() rate.Limit {
+ newLimit := rate.Limit(float64(l.params.RateLimit) * l.adjustmentFactor)
+ return rate.Limit(l.adjustmentLimit(float64(newLimit), float64(l.params.RateLimit)))
+}
+
+func (l *APILimiter) adjustedParallelRequests() int {
+ newParallelRequests := l.delayedAdjustment(float64(l.params.ParallelRequests),
+ float64(l.params.MinParallelRequests), float64(l.params.MaxParallelRequests))
+ return int(l.adjustmentLimit(newParallelRequests, float64(l.params.ParallelRequests)))
+}
+
+func (l *APILimiter) requestFinished(r *limitedRequest, err error) {
+ if r.finished {
+ return
+ }
+
+ r.finished = true
+
+ var processingDuration time.Duration
+ if !r.startTime.IsZero() {
+ processingDuration = time.Since(r.startTime)
+ }
+
+ totalDuration := time.Since(r.scheduleTime)
+
+ scopedLog := log.WithFields(logrus.Fields{
+ logAPICallName: l.name,
+ logUUID: r.uuid,
+ logProcessingDuration: processingDuration,
+ logTotalDuration: totalDuration,
+ logWaitDurationTotal: r.waitDuration,
+ })
+
+ if err != nil {
+ scopedLog = scopedLog.WithError(err)
+ }
+
+ if l.params.Log {
+ scopedLog.Info("API call has been processed")
+ } else {
+ scopedLog.Debug("API call has been processed")
+ }
+
+ if r.waitSemaphoreWeight != 0 {
+ l.parallelWaitSemaphore.Release(r.waitSemaphoreWeight)
+ }
+
+ l.mutex.Lock()
+
+ if !r.startTime.IsZero() {
+ l.requestsProcessed++
+ l.currentRequestsInFlight--
+ }
+
+ // Only auto-adjust ratelimiter using metrics from successful API requests
+ if err == nil {
+ l.processingDurations = append(l.processingDurations, processingDuration)
+ if exceed := len(l.processingDurations) - l.params.MeanOver; exceed > 0 {
+ l.processingDurations = l.processingDurations[exceed:]
+ }
+ l.meanProcessingDuration = calcMeanDuration(l.processingDurations)
+
+ l.waitDurations = append(l.waitDurations, r.waitDuration)
+ if exceed := len(l.waitDurations) - l.params.MeanOver; exceed > 0 {
+ l.waitDurations = l.waitDurations[exceed:]
+ }
+ l.meanWaitDuration = calcMeanDuration(l.waitDurations)
+
+ if l.params.AutoAdjust && l.params.EstimatedProcessingDuration != 0 {
+ l.adjustmentFactor = l.calculateAdjustmentFactor()
+ l.parallelRequests = l.adjustedParallelRequests()
+
+ if l.limiter != nil {
+ l.limiter.SetLimit(l.adjustedLimit())
+
+ newBurst := l.adjustedBurst()
+ l.limiter.SetBurst(newBurst)
+ }
+ }
+ }
+
+ values := MetricsValues{
+ EstimatedProcessingDuration: l.params.EstimatedProcessingDuration.Seconds(),
+ WaitDuration: r.waitDuration,
+ MaxWaitDuration: l.params.MaxWaitDuration,
+ MinWaitDuration: l.params.MinWaitDuration,
+ MeanProcessingDuration: l.meanProcessingDuration,
+ MeanWaitDuration: l.meanWaitDuration,
+ ParallelRequests: l.parallelRequests,
+ CurrentRequestsInFlight: l.currentRequestsInFlight,
+ AdjustmentFactor: l.adjustmentFactor,
+ Error: err,
+ Outcome: string(r.outcome),
+ }
+
+ if l.limiter != nil {
+ values.Limit = l.limiter.Limit()
+ values.Burst = l.limiter.Burst()
+ }
+ l.mutex.Unlock()
+
+ if l.metrics != nil {
+ l.metrics.ProcessedRequest(l.name, values)
+ }
+}
+
+// calcMeanDuration returns the mean duration in seconds
+func calcMeanDuration(durations []time.Duration) float64 {
+ total := 0.0
+ for _, t := range durations {
+ total += t.Seconds()
+ }
+ return total / float64(len(durations))
+}
+
+// LimitedRequest represents a request that is being limited. It is returned
+// by Wait() and the caller of Wait() is responsible to call Done() or Error()
+// when the API call has been processed or resulted in an error. It is safe to
+// call Error() and then Done(). It is not safe to call Done(), Error(), or
+// WaitDuration() concurrently.
+type LimitedRequest interface {
+ Done()
+ Error(err error)
+ WaitDuration() time.Duration
+}
+
+type limitedRequest struct {
+ limiter *APILimiter
+ startTime time.Time
+ scheduleTime time.Time
+ waitDuration time.Duration
+ waitSemaphoreWeight int64
+ uuid string
+ finished bool
+ outcome outcome
+}
+
+// WaitDuration returns the duration the request had to wait
+func (l *limitedRequest) WaitDuration() time.Duration {
+ return l.waitDuration
+}
+
+// Done must be called when the API request has been successfully processed
+func (l *limitedRequest) Done() {
+ l.limiter.requestFinished(l, nil)
+}
+
+// Error must be called when the API request resulted in an error
+func (l *limitedRequest) Error(err error) {
+ l.limiter.requestFinished(l, err)
+}
+
+// Wait blocks until the next API call is allowed to be processed. If the
+// configured MaxWaitDuration is exceeded, an error is returned. On success, a
+// LimitedRequest is returned on which Done() must be called when the API call
+// has completed or Error() if an error occurred.
+func (l *APILimiter) Wait(ctx context.Context) (LimitedRequest, error) {
+ req, err := l.wait(ctx)
+ if err != nil {
+ l.requestFinished(req, err)
+ return nil, err
+ }
+ return req, nil
+}
+
+// wait implements the API rate limiting delaying functionality. Every error
+// message and corresponding log message are documented in
+// Documentation/configuration/api-rate-limiting.rst. If any changes related to
+// errors or log messages are made to this function, please update the
+// aforementioned page as well.
+func (l *APILimiter) wait(ctx context.Context) (req *limitedRequest, err error) {
+ var (
+ limitWaitDuration time.Duration
+ r *rate.Reservation
+ )
+
+ req = &limitedRequest{
+ limiter: l,
+ scheduleTime: time.Now(),
+ uuid: uuid.New().String(),
+ }
+
+ l.mutex.Lock()
+
+ l.requestsScheduled++
+
+ scopedLog := log.WithFields(logrus.Fields{
+ logAPICallName: l.name,
+ logUUID: req.uuid,
+ logParallelRequests: l.parallelRequests,
+ })
+
+ if l.params.MaxWaitDuration > 0 {
+ scopedLog = scopedLog.WithField(logMaxWaitDuration, l.params.MaxWaitDuration)
+ }
+
+ if l.params.MinWaitDuration > 0 {
+ scopedLog = scopedLog.WithField(logMinWaitDuration, l.params.MinWaitDuration)
+ }
+
+ select {
+ case <-ctx.Done():
+ if l.params.Log {
+ scopedLog.Warning("Not processing API request due to cancelled context")
+ }
+ l.mutex.Unlock()
+ req.outcome = outcomeReqCancelled
+ err = fmt.Errorf("request cancelled while waiting for rate limiting slot: %w", ctx.Err())
+ return
+ default:
+ }
+
+ skip := l.params.SkipInitial > 0 && l.requestsScheduled <= int64(l.params.SkipInitial)
+ if skip {
+ scopedLog = scopedLog.WithField(logSkipped, skip)
+ }
+
+ parallelRequests := l.parallelRequests
+ meanProcessingDuration := l.meanProcessingDuration
+ l.mutex.Unlock()
+
+ if l.params.Log {
+ scopedLog.Info("Processing API request with rate limiter")
+ } else {
+ scopedLog.Debug("Processing API request with rate limiter")
+ }
+
+ if skip {
+ goto skipRateLimiter
+ }
+
+ if parallelRequests > 0 {
+ waitCtx := ctx
+ if l.params.MaxWaitDuration > 0 {
+ ctx2, cancel := context.WithTimeout(ctx, l.params.MaxWaitDuration)
+ defer cancel()
+ waitCtx = ctx2
+ }
+ w := int64(waitSemaphoreResolution / parallelRequests)
+ err2 := l.parallelWaitSemaphore.Acquire(waitCtx, w)
+ if err2 != nil {
+ if l.params.Log {
+ scopedLog.WithError(err2).Warning("Not processing API request. Wait duration for maximum parallel requests exceeds maximum")
+ }
+ req.outcome = outcomeParallelMaxWait
+ err = fmt.Errorf("timed out while waiting to be served with %d parallel requests: %w", parallelRequests, err2)
+ return
+ }
+ req.waitSemaphoreWeight = w
+ }
+ req.waitDuration = time.Since(req.scheduleTime)
+
+ l.mutex.Lock()
+ if l.limiter != nil {
+ r = l.limiter.Reserve()
+ limitWaitDuration = r.Delay()
+
+ scopedLog = scopedLog.WithFields(logrus.Fields{
+ logLimit: fmt.Sprintf("%.2f/s", l.limiter.Limit()),
+ logBurst: l.limiter.Burst(),
+ logWaitDurationLimit: limitWaitDuration,
+ logMaxWaitDurationLimiter: l.params.MaxWaitDuration - req.waitDuration,
+ })
+ }
+ l.mutex.Unlock()
+
+ if l.params.MinWaitDuration > 0 && limitWaitDuration < l.params.MinWaitDuration {
+ limitWaitDuration = l.params.MinWaitDuration
+ }
+
+ if (l.params.MaxWaitDuration > 0 && (limitWaitDuration+req.waitDuration) > l.params.MaxWaitDuration) || limitWaitDuration == rate.InfDuration {
+ if l.params.Log {
+ scopedLog.Warning("Not processing API request. Wait duration exceeds maximum")
+ }
+
+ // The rate limiter should only consider a reservation valid if
+ // the request is actually processed. Cancellation of the
+ // reservation should happen before we sleep below.
+ if r != nil {
+ r.Cancel()
+ }
+
+ // Instead of returning immediately, pace the caller by
+ // sleeping for the mean processing duration. This helps
+ // against callers who disrespect 429 error codes and retry
+ // immediately.
+ if meanProcessingDuration > 0.0 {
+ time.Sleep(time.Duration(meanProcessingDuration * float64(time.Second)))
+ }
+
+ req.outcome = outcomeLimitMaxWait
+ err = fmt.Errorf("request would have to wait %v to be served (maximum wait duration: %v)",
+ limitWaitDuration, l.params.MaxWaitDuration-req.waitDuration)
+ return
+ }
+
+ if limitWaitDuration != 0 {
+ select {
+ case <-time.After(limitWaitDuration):
+ case <-ctx.Done():
+ if l.params.Log {
+ scopedLog.Warning("Not processing API request due to cancelled context while waiting")
+ }
+ // The rate limiter should only consider a reservation
+ // valid if the request is actually processed.
+ if r != nil {
+ r.Cancel()
+ }
+
+ req.outcome = outcomeReqCancelled
+ err = fmt.Errorf("request cancelled while waiting for rate limiting slot: %w", ctx.Err())
+ return
+ }
+ }
+
+ req.waitDuration = time.Since(req.scheduleTime)
+
+skipRateLimiter:
+
+ l.mutex.Lock()
+ l.currentRequestsInFlight++
+ l.mutex.Unlock()
+
+ scopedLog = scopedLog.WithField(logWaitDurationTotal, req.waitDuration)
+
+ if l.params.Log {
+ scopedLog.Info("API request released by rate limiter")
+ } else {
+ scopedLog.Debug("API request released by rate limiter")
+ }
+
+ req.startTime = time.Now()
+ return req, nil
+
+}
+
+func parseRate(r string) (rate.Limit, error) {
+ tokens := strings.SplitN(r, "/", 2)
+ if len(tokens) != 2 {
+ return 0, fmt.Errorf("not in the form number/interval")
+ }
+
+ f, err := strconv.ParseFloat(tokens[0], 64)
+ if err != nil {
+ return 0, fmt.Errorf("unable to parse float %q: %w", tokens[0], err)
+ }
+
+ // Reject rates such as 1/1 or 10/10 as it will default to nanoseconds
+ // which is likely unexpected to the user. Require an explicit suffix.
+ if _, err := strconv.ParseInt(string(tokens[1]), 10, 64); err == nil {
+ return 0, fmt.Errorf("interval %q must contain duration suffix", tokens[1])
+ }
+
+ // If duration is provided as "m" or "s", convert it into "1m" or "1s"
+ if _, err := strconv.ParseInt(string(tokens[1][0]), 10, 64); err != nil {
+ tokens[1] = "1" + tokens[1]
+ }
+
+ d, err := time.ParseDuration(tokens[1])
+ if err != nil {
+ return 0, fmt.Errorf("unable to parse duration %q: %w", tokens[1], err)
+ }
+
+ return rate.Limit(f / d.Seconds()), nil
+}
+
+// APILimiterSet is a set of APILimiter indexed by name
+type APILimiterSet struct {
+ limiters map[string]*APILimiter
+ metrics MetricsObserver
+}
+
+// MetricsValues is the snapshot of relevant values to feed into the
+// MetricsObserver
+type MetricsValues struct {
+ WaitDuration time.Duration
+ MinWaitDuration time.Duration
+ MaxWaitDuration time.Duration
+ Outcome string
+ MeanProcessingDuration float64
+ MeanWaitDuration float64
+ EstimatedProcessingDuration float64
+ ParallelRequests int
+ Limit rate.Limit
+ Burst int
+ CurrentRequestsInFlight int
+ AdjustmentFactor float64
+ Error error
+}
+
+// MetricsObserver is the interface that must be implemented to extract metrics
+type MetricsObserver interface {
+ // ProcessedRequest is invoked after invocation of an API call
+ ProcessedRequest(name string, values MetricsValues)
+}
+
+// NewAPILimiterSet creates a new APILimiterSet based on a set of rate limiting
+// configurations and the default configuration. Any rate limiter that is
+// configured in the config OR the defaults will be configured and made
+// available via the Limiter(name) and Wait() function.
+func NewAPILimiterSet(config map[string]string, defaults map[string]APILimiterParameters, metrics MetricsObserver) (*APILimiterSet, error) {
+ limiters := map[string]*APILimiter{}
+
+ for name, p := range defaults {
+ // Merge user config into defaults when provided
+ if userConfig, ok := config[name]; ok {
+ combinedParams, err := p.MergeUserConfig(userConfig)
+ if err != nil {
+ return nil, err
+ }
+ p = combinedParams
+ }
+
+ limiters[name] = NewAPILimiter(name, p, metrics)
+ }
+
+ for name, c := range config {
+ if _, ok := defaults[name]; !ok {
+ l, err := NewAPILimiterFromConfig(name, c, metrics)
+ if err != nil {
+ return nil, fmt.Errorf("unable to parse rate limiting configuration %s=%s: %w", name, c, err)
+ }
+
+ limiters[name] = l
+ }
+ }
+
+ return &APILimiterSet{
+ limiters: limiters,
+ metrics: metrics,
+ }, nil
+}
+
+// Limiter returns the APILimiter with a given name
+func (s *APILimiterSet) Limiter(name string) *APILimiter {
+ return s.limiters[name]
+}
+
+type dummyRequest struct{}
+
+func (d dummyRequest) WaitDuration() time.Duration { return 0 }
+func (d dummyRequest) Done() {}
+func (d dummyRequest) Error(err error) {}
+
+// Wait invokes Wait() on the APILimiter with the given name. If the limiter
+// does not exist, a dummy limiter is used which will not impose any
+// restrictions.
+func (s *APILimiterSet) Wait(ctx context.Context, name string) (LimitedRequest, error) {
+ l, ok := s.limiters[name]
+ if !ok {
+ return dummyRequest{}, nil
+ }
+
+ return l.Wait(ctx)
+}
+
+// parsePositiveInt parses value as an int. It returns an error if value cannot
+// be parsed or is negative.
+func parsePositiveInt(value string) (int, error) {
+ switch i64, err := strconv.ParseInt(value, 10, 64); {
+ case err != nil:
+ return 0, fmt.Errorf("unable to parse positive integer %q: %v", value, err)
+ case i64 < 0:
+ return 0, fmt.Errorf("unable to parse positive integer %q: negative value", value)
+ case i64 > math.MaxInt:
+ return 0, fmt.Errorf("unable to parse positive integer %q: overflow", value)
+ default:
+ return int(i64), nil
+ }
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/rate/doc.go b/vendor/github.com/cilium/cilium/pkg/rate/doc.go
new file mode 100644
index 000000000..b031503aa
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/rate/doc.go
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Package rate provides a rate limiter to rate limit requests that can be
+// burstable but they should only allowed N per a period defined.
+// This package differs from the "golang.org/x/time/rate" package as it does not
+// implement the token bucket algorithm.
+package rate
diff --git a/vendor/github.com/cilium/cilium/pkg/rate/limiter.go b/vendor/github.com/cilium/cilium/pkg/rate/limiter.go
new file mode 100644
index 000000000..abb7be05e
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/rate/limiter.go
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package rate
+
+import (
+ "context"
+ "fmt"
+ "sync/atomic"
+ "time"
+
+ "golang.org/x/sync/semaphore"
+)
+
+// Limiter is used to limit the number of operations done.
+type Limiter struct {
+ semaphore *semaphore.Weighted
+ burst int64
+ currWeights atomic.Int64
+ ticker *time.Ticker
+ cancelFunc context.CancelFunc
+ ctx context.Context
+}
+
+// NewLimiter returns a new Limiter that allows events up to b tokens during
+// the given interval.
+// This Limiter has a different implementation from the 'x/time/rate's Limiter
+// implementation. 'x/time/rate.Limiter' sends a constant stream of updates
+// (at a rate of few dozen events per second) over the period of a N minutes
+// which is the behavior of the token bucket algorithm. It is designed to
+// flatten bursts in a signal to a fixed output rate.
+// This rate.Limiter does the opposite of 'x/time/rate.Limiter'. It takes a
+// somewhat fixed-rate stream of updates and turns it into a stream of
+// controlled small bursts every N minutes.
+func NewLimiter(interval time.Duration, b int64) *Limiter {
+ ticker := time.NewTicker(interval)
+ ctx, cancel := context.WithCancel(context.Background())
+ l := &Limiter{
+ semaphore: semaphore.NewWeighted(b),
+ burst: b,
+ ticker: ticker,
+ ctx: ctx,
+ cancelFunc: cancel,
+ }
+ go func() {
+ for {
+ select {
+ case <-ticker.C:
+ case <-l.ctx.Done():
+ return
+ }
+ currWeights := l.currWeights.Swap(0)
+ l.semaphore.Release(currWeights)
+ }
+ }()
+ return l
+}
+
+// Stop stops the internal components used for the rate limiter logic.
+func (lim *Limiter) Stop() {
+ lim.cancelFunc()
+ lim.ticker.Stop()
+}
+
+func (lim *Limiter) assertAlive() {
+ select {
+ case <-lim.ctx.Done():
+ panic("limiter misuse: Allow / Wait / WaitN called concurrently after Stop")
+ default:
+ }
+}
+
+// Allow is shorthand for AllowN(1).
+func (lim *Limiter) Allow() bool {
+ return lim.AllowN(1)
+}
+
+// AllowN returns true if it's possible to allow n tokens.
+func (lim *Limiter) AllowN(n int64) bool {
+ lim.assertAlive()
+ acq := lim.semaphore.TryAcquire(n)
+ if acq {
+ lim.currWeights.Add(n)
+ return true
+ }
+ return false
+}
+
+// Wait is shorthand for WaitN(ctx, 1).
+func (lim *Limiter) Wait(ctx context.Context) error {
+ return lim.WaitN(ctx, 1)
+}
+
+// WaitN acquires n tokens, blocking until resources are available or ctx is
+// done. On success, returns nil. On failure, returns ctx.Err() and leaves the
+// limiter unchanged.
+//
+// If ctx is already done, WaitN may still succeed without blocking.
+func (lim *Limiter) WaitN(ctx context.Context, n int64) error {
+ lim.assertAlive()
+ if n > lim.burst {
+ return fmt.Errorf("rate: Wait(n=%d) exceeds limiter's burst %d", n, lim.burst)
+ }
+ err := lim.semaphore.Acquire(ctx, n)
+ if err != nil {
+ return err
+ }
+ lim.currWeights.Add(n)
+ return nil
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/rate/metrics/metrics.go b/vendor/github.com/cilium/cilium/pkg/rate/metrics/metrics.go
new file mode 100644
index 000000000..74764ed64
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/rate/metrics/metrics.go
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package metrics
+
+import (
+ "github.com/cilium/cilium/pkg/metrics"
+ "github.com/cilium/cilium/pkg/rate"
+)
+
+func APILimiterObserver() rate.MetricsObserver {
+ return &apiRateLimitingMetrics{}
+}
+
+type apiRateLimitingMetrics struct{}
+
+func (a *apiRateLimitingMetrics) ProcessedRequest(name string, v rate.MetricsValues) {
+ metrics.APILimiterProcessingDuration.WithLabelValues(name, "mean").Set(v.MeanProcessingDuration)
+ metrics.APILimiterProcessingDuration.WithLabelValues(name, "estimated").Set(v.EstimatedProcessingDuration)
+ metrics.APILimiterWaitDuration.WithLabelValues(name, "mean").Set(v.MeanWaitDuration)
+ metrics.APILimiterWaitDuration.WithLabelValues(name, "max").Set(v.MaxWaitDuration.Seconds())
+ metrics.APILimiterWaitDuration.WithLabelValues(name, "min").Set(v.MinWaitDuration.Seconds())
+ metrics.APILimiterRequestsInFlight.WithLabelValues(name, "in-flight").Set(float64(v.CurrentRequestsInFlight))
+ metrics.APILimiterRequestsInFlight.WithLabelValues(name, "limit").Set(float64(v.ParallelRequests))
+ metrics.APILimiterRateLimit.WithLabelValues(name, "limit").Set(float64(v.Limit))
+ metrics.APILimiterRateLimit.WithLabelValues(name, "burst").Set(float64(v.Burst))
+ metrics.APILimiterAdjustmentFactor.WithLabelValues(name).Set(v.AdjustmentFactor)
+
+ if v.Outcome == "" {
+ metrics.APILimiterWaitHistoryDuration.WithLabelValues(name).Observe(v.WaitDuration.Seconds())
+ v.Outcome = metrics.Error2Outcome(v.Error)
+ }
+
+ metrics.APILimiterProcessedRequests.WithLabelValues(name, v.Outcome).Inc()
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/safeio/safeio.go b/vendor/github.com/cilium/cilium/pkg/safeio/safeio.go
new file mode 100644
index 000000000..4eddccc41
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/safeio/safeio.go
@@ -0,0 +1,77 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package safeio
+
+import (
+ "fmt"
+ "io"
+)
+
+// ErrLimitReached indicates that ReadAllLimit has
+// reached its limit before completing a full read
+// of the io.Reader.
+var ErrLimitReached = fmt.Errorf("read limit reached")
+
+// ByteSize expresses the size of bytes
+type ByteSize float64
+
+const (
+ _ = iota // ignore first value by assigning to blank identifier
+ // KB is a Kilobyte
+ KB ByteSize = 1 << (10 * iota)
+ // MB is a Megabyte
+ MB
+ // GB is a Gigabyte
+ GB
+ // TB is a Terabyte
+ TB
+ // PB is a Petabyte
+ PB
+ // EB is an Exabyte
+ EB
+ // ZB is a Zettabyte
+ ZB
+ // YB is a Yottabyte
+ YB
+)
+
+// String converts a ByteSize to a string
+func (b ByteSize) String() string {
+ switch {
+ case b >= YB:
+ return fmt.Sprintf("%.1fYB", b/YB)
+ case b >= ZB:
+ return fmt.Sprintf("%.1fZB", b/ZB)
+ case b >= EB:
+ return fmt.Sprintf("%.1fEB", b/EB)
+ case b >= PB:
+ return fmt.Sprintf("%.1fPB", b/PB)
+ case b >= TB:
+ return fmt.Sprintf("%.1fTB", b/TB)
+ case b >= GB:
+ return fmt.Sprintf("%.1fGB", b/GB)
+ case b >= MB:
+ return fmt.Sprintf("%.1fMB", b/MB)
+ case b >= KB:
+ return fmt.Sprintf("%.1fKB", b/KB)
+ }
+ return fmt.Sprintf("%.1fB", b)
+}
+
+// ReadAllLimit reads from r until an error, EOF, or after n bytes and returns
+// the data it read. A successful call returns err == nil, not err == EOF.
+// Because ReadAllLimit is defined to read from src until EOF it does not
+// treat an EOF from Read as an error to be reported. If the limit is reached
+// ReadAllLimit will return ErrLimitReached as an error.
+func ReadAllLimit(r io.Reader, n ByteSize) ([]byte, error) {
+ limit := int(n + 1)
+ buf, err := io.ReadAll(io.LimitReader(r, int64(limit)))
+ if err != nil {
+ return buf, err
+ }
+ if len(buf) >= limit {
+ return buf[:limit-1], ErrLimitReached
+ }
+ return buf, nil
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/safetime/doc.go b/vendor/github.com/cilium/cilium/pkg/safetime/doc.go
new file mode 100644
index 000000000..11ecf3c5f
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/safetime/doc.go
@@ -0,0 +1,6 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Package safetime contains a wrapper function for time.Since to deal with
+// negative durations.
+package safetime
diff --git a/vendor/github.com/cilium/cilium/pkg/safetime/safetime.go b/vendor/github.com/cilium/cilium/pkg/safetime/safetime.go
new file mode 100644
index 000000000..99240e626
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/safetime/safetime.go
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package safetime
+
+import (
+ "runtime"
+ "time"
+
+ "github.com/sirupsen/logrus"
+
+ "github.com/cilium/cilium/pkg/logging/logfields"
+)
+
+// TimeSinceSafe returns the duration since t. If the duration is negative,
+// returns false to indicate the fact.
+//
+// Used to workaround a malfunctioning monotonic clock.
+func TimeSinceSafe(t time.Time, logger *logrus.Entry) (time.Duration, bool) {
+ n := time.Now()
+ d := n.Sub(t)
+
+ if d < 0 {
+ logger = logger.WithFields(logrus.Fields{
+ logfields.StartTime: t,
+ logfields.EndTime: n,
+ logfields.Duration: d,
+ })
+ _, file, line, ok := runtime.Caller(1)
+ if ok {
+ logger = logger.WithFields(logrus.Fields{
+ logfields.Path: file,
+ logfields.Line: line,
+ })
+ }
+ logger.Warn("BUG: negative duration")
+
+ return time.Duration(0), false
+ }
+
+ return d, true
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/source/source.go b/vendor/github.com/cilium/cilium/pkg/source/source.go
new file mode 100644
index 000000000..4156105ab
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/source/source.go
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package source
+
+// Source describes the source of a definition
+type Source string
+
+const (
+ // Unspec is used when the source is unspecified
+ Unspec Source = "unspec"
+
+ // KubeAPIServer is the source used for state which represents the
+ // kube-apiserver, such as the IPs associated with it. This is not to be
+ // confused with the Kubernetes source.
+ // KubeAPIServer state has the strongest ownership and can only be
+ // overwritten by itself.
+ KubeAPIServer Source = "kube-apiserver"
+
+ // Local is the source used for state derived from local agent state.
+ // Local state has the second strongest ownership, behind KubeAPIServer.
+ Local Source = "local"
+
+ // KVStore is the source used for state derived from a key value store.
+ // State in the key value stored takes precedence over orchestration
+ // system state such as Kubernetes.
+ KVStore Source = "kvstore"
+
+ // CustomResource is the source used for state derived from Kubernetes
+ // custom resources
+ CustomResource Source = "custom-resource"
+
+ // Kubernetes is the source used for state derived from Kubernetes
+ Kubernetes Source = "k8s"
+
+ // LocalAPI is the source used for state derived from the API served
+ // locally on the node.
+ LocalAPI Source = "api"
+
+ // Generated is the source used for generated state which can be
+ // overwritten by all other sources, except for restored (and unspec).
+ Generated Source = "generated"
+
+ // Restored is the source used for restored state from data left behind
+ // by the previous agent instance. Can be overwritten by all other
+ // sources (except for unspec).
+ Restored Source = "restored"
+)
+
+// AllowOverwrite returns true if new state from a particular source is allowed
+// to overwrite existing state from another source
+func AllowOverwrite(existing, new Source) bool {
+ switch existing {
+
+ // KubeAPIServer state can only be overwritten by other kube-apiserver
+ // state.
+ case KubeAPIServer:
+ return new == KubeAPIServer
+
+ // Local state can only be overwritten by other local state or
+ // kube-apiserver state.
+ case Local:
+ return new == Local || new == KubeAPIServer
+
+ // KVStore can be overwritten by other kvstore, local state, or
+ // kube-apiserver state.
+ case KVStore:
+ return new == KVStore || new == Local || new == KubeAPIServer
+
+ // Custom-resource state can be overwritten by other CRD, kvstore,
+ // local or kube-apiserver state.
+ case CustomResource:
+ return new == CustomResource || new == KVStore || new == Local || new == KubeAPIServer
+
+ // Kubernetes state can be overwritten by everything except local API,
+ // generated, restored and unspecified state.
+ case Kubernetes:
+ return new != LocalAPI && new != Generated && new != Restored && new != Unspec
+
+ // Local API state can be overwritten by everything except restored,
+ // generated and unspecified state
+ case LocalAPI:
+ return new != Generated && new != Restored && new != Unspec
+
+ // Generated can be overwritten by everything except by Restored and
+ // Unspecified
+ case Generated:
+ return new != Restored && new != Unspec
+
+ // Restored can be overwritten by everything except by Unspecified
+ case Restored:
+ return new != Unspec
+
+ // Unspecified state can be overwritten by everything
+ case Unspec:
+ return true
+ }
+
+ return true
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/spanstat/doc.go b/vendor/github.com/cilium/cilium/pkg/spanstat/doc.go
new file mode 100644
index 000000000..61b9dc812
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/spanstat/doc.go
@@ -0,0 +1,6 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Package spanstat provides a mechanism to measure duration of multiple spans
+// and add them up to a total duration
+package spanstat
diff --git a/vendor/github.com/cilium/cilium/pkg/spanstat/spanstat.go b/vendor/github.com/cilium/cilium/pkg/spanstat/spanstat.go
new file mode 100644
index 000000000..e3f7dbb6c
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/spanstat/spanstat.go
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package spanstat
+
+import (
+ "time"
+
+ "github.com/cilium/cilium/pkg/lock"
+ "github.com/cilium/cilium/pkg/logging"
+ "github.com/cilium/cilium/pkg/logging/logfields"
+ "github.com/cilium/cilium/pkg/safetime"
+)
+
+var (
+ subSystem = "spanstat"
+ log = logging.DefaultLogger.WithField(logfields.LogSubsys, subSystem)
+)
+
+// SpanStat measures the total duration of all time spent in between Start()
+// and Stop() calls.
+type SpanStat struct {
+ mutex lock.RWMutex
+ spanStart time.Time
+ successDuration time.Duration
+ failureDuration time.Duration
+}
+
+// Start creates a new SpanStat and starts it
+func Start() *SpanStat {
+ s := &SpanStat{}
+ return s.Start()
+}
+
+// Start starts a new span
+func (s *SpanStat) Start() *SpanStat {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ s.spanStart = time.Now()
+ return s
+}
+
+// EndError calls End() based on the value of err
+func (s *SpanStat) EndError(err error) *SpanStat {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ return s.end(err == nil)
+}
+
+// End ends the current span and adds the measured duration to the total
+// cumulated duration, and to the success or failure cumulated duration
+// depending on the given success flag
+func (s *SpanStat) End(success bool) *SpanStat {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ return s.end(success)
+}
+
+// must be called with Lock() held
+func (s *SpanStat) end(success bool) *SpanStat {
+ if !s.spanStart.IsZero() {
+ d, _ := safetime.TimeSinceSafe(s.spanStart, log)
+ if success {
+ s.successDuration += d
+ } else {
+ s.failureDuration += d
+ }
+ }
+ s.spanStart = time.Time{}
+ return s
+}
+
+// Total returns the total duration of all spans measured, including both
+// successes and failures
+func (s *SpanStat) Total() time.Duration {
+ s.mutex.RLock()
+ defer s.mutex.RUnlock()
+ return s.successDuration + s.failureDuration
+}
+
+// SuccessTotal returns the total duration of all successful spans measured
+func (s *SpanStat) SuccessTotal() time.Duration {
+ s.mutex.RLock()
+ defer s.mutex.RUnlock()
+ return s.successDuration
+}
+
+// FailureTotal returns the total duration of all unsuccessful spans measured
+func (s *SpanStat) FailureTotal() time.Duration {
+ s.mutex.RLock()
+ defer s.mutex.RUnlock()
+ return s.failureDuration
+}
+
+// Reset rests the duration measurements
+func (s *SpanStat) Reset() {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ s.successDuration = 0
+ s.failureDuration = 0
+}
+
+// Seconds returns the number of seconds represents by the spanstat. If a span
+// is still open, it is closed first.
+func (s *SpanStat) Seconds() float64 {
+ s.mutex.Lock()
+ defer s.mutex.Unlock()
+ if !s.spanStart.IsZero() {
+ s.end(true)
+ }
+
+ total := s.successDuration + s.failureDuration
+ return total.Seconds()
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/stream/observable.go b/vendor/github.com/cilium/cilium/pkg/stream/observable.go
new file mode 100644
index 000000000..22e96af95
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/stream/observable.go
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// The stream package provides utilities for working with observable streams.
+// Any type that implements the Observable interface can be transformed and
+// consumed with these utilities.
+package stream
+
+import "context"
+
+// Observable defines the Observe method for observing a stream of values.
+//
+// Also see https://reactivex.io/documentation/observable.html for in-depth
+// description of observables.
+//
+// For interactive diagrams see https://rxmarbles.com/.
+type Observable[T any] interface {
+ // Observe a stream of values as long as the given context is valid.
+ // 'next' is called for each item, and finally 'complete' is called
+ // when the stream is complete, or an error has occurred.
+ //
+ // Observable implementations are allowed to call 'next' and 'complete'
+ // from any goroutine, but never concurrently.
+ Observe(ctx context.Context, next func(T), complete func(error))
+}
+
+// FuncObservable implements the Observable interface with a function.
+//
+// This provides a convenient way of creating new observables without having
+// to introduce a new type:
+//
+// var Ones Observable[int] =
+// FuncObservable[int](
+// func(ctx context.Context, next func(int), complete func(error)) {
+// go func() {
+// defer complete(nil)
+// for ctx.Err() == nil {
+// next(1)
+// }
+// }()
+// })
+//
+// versus with a new type:
+//
+// type onesObservable struct {}
+//
+// func (o onesObservable) Observe(ctx context.Context, next func(int), complete func(error)) {
+// go func() {
+// defer complete(nil)
+// for ctx.Err() == nil {
+// next(1)
+// }
+// }()
+// }
+type FuncObservable[T any] func(context.Context, func(T), func(error))
+
+func (f FuncObservable[T]) Observe(ctx context.Context, next func(T), complete func(error)) {
+ f(ctx, next, complete)
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/stream/operators.go b/vendor/github.com/cilium/cilium/pkg/stream/operators.go
new file mode 100644
index 000000000..0f9e802e1
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/stream/operators.go
@@ -0,0 +1,240 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package stream
+
+import (
+ "context"
+ "time"
+
+ "golang.org/x/time/rate"
+)
+
+//
+// Operators transform the observable stream.
+//
+
+// Map applies a function onto values of an observable and emits the resulting values.
+//
+// Map(Range(1,4), func(x int) int { return x * 2})
+// => [2,4,6]
+func Map[A, B any](src Observable[A], apply func(A) B) Observable[B] {
+ return FuncObservable[B](
+ func(ctx context.Context, next func(B), complete func(error)) {
+ src.Observe(
+ ctx,
+ func(a A) { next(apply(a)) },
+ complete)
+ })
+}
+
+// Filter only emits the values for which the provided predicate returns true.
+//
+// Filter(Range(1,4), func(x int) int { return x%2 == 0 })
+// => [2]
+func Filter[T any](src Observable[T], pred func(T) bool) Observable[T] {
+ return FuncObservable[T](
+ func(ctx context.Context, next func(T), complete func(error)) {
+ src.Observe(
+ ctx,
+ func(x T) {
+ if pred(x) {
+ next(x)
+ }
+ },
+ complete)
+ })
+}
+
+// Reduce takes an initial state, and a function 'reduce' that is called on each element
+// along with a state and returns an observable with a single item: the state produced
+// by the last call to 'reduce'.
+//
+// Reduce(Range(1,4), 0, func(sum, item int) int { return sum + item })
+// => [(0+1+2+3)] => [6]
+func Reduce[Item, Result any](src Observable[Item], init Result, reduce func(Result, Item) Result) Observable[Result] {
+ result := init
+ return FuncObservable[Result](
+ func(ctx context.Context, next func(Result), complete func(error)) {
+ src.Observe(
+ ctx,
+ func(x Item) {
+ result = reduce(result, x)
+ },
+ func(err error) {
+ if err == nil {
+ next(result)
+ }
+ complete(err)
+ })
+ })
+}
+
+// Distinct skips adjacent equal values.
+//
+// Distinct(FromSlice([]int{1,1,2,2,3})
+// => [1,2,3]
+func Distinct[T comparable](src Observable[T]) Observable[T] {
+ var prev T
+ first := true
+ return Filter(src, func(item T) bool {
+ if first {
+ first = false
+ prev = item
+ return true
+ }
+ eq := prev == item
+ prev = item
+ return !eq
+ })
+}
+
+// RetryFunc decides whether the processing should be retried given the error
+type RetryFunc func(err error) bool
+
+// Retry resubscribes to the observable if it completes with an error.
+func Retry[T any](src Observable[T], shouldRetry RetryFunc) Observable[T] {
+ return FuncObservable[T](
+ func(ctx context.Context, next func(T), complete func(error)) {
+ var observe func()
+ observe = func() {
+ src.Observe(
+ ctx,
+ next,
+ func(err error) {
+ if err != nil && shouldRetry(err) {
+ observe()
+ } else {
+ complete(err)
+ }
+ })
+ }
+ observe()
+ })
+}
+
+// AlwaysRetry always asks for a retry regardless of the error.
+func AlwaysRetry(err error) bool {
+ return true
+}
+
+// BackoffRetry retries with an exponential backoff.
+func BackoffRetry(shouldRetry RetryFunc, minBackoff, maxBackoff time.Duration) RetryFunc {
+ backoff := minBackoff
+ return func(err error) bool {
+ time.Sleep(backoff)
+ backoff *= 2
+ if backoff > maxBackoff {
+ backoff = maxBackoff
+ }
+ return shouldRetry(err)
+ }
+
+}
+
+// LimitRetries limits the number of retries with the given retry method.
+// e.g. LimitRetries(BackoffRetry(time.Millisecond, time.Second), 5)
+func LimitRetries(shouldRetry RetryFunc, numRetries int) RetryFunc {
+ return func(err error) bool {
+ if numRetries <= 0 {
+ return false
+ }
+ numRetries--
+ return shouldRetry(err)
+ }
+}
+
+// ToMulticast makes 'src' a multicast observable, e.g. each observer will observe
+// the same sequence. Useful for fanning out items to multiple observers from a source
+// that is consumed by the act of observing.
+//
+// mcast, connect := ToMulticast(FromChannel(values))
+// a := ToSlice(mcast)
+// b := ToSlice(mcast)
+// connect(ctx) // start!
+// => a == b
+func ToMulticast[T any](src Observable[T], opts ...MulticastOpt) (mcast Observable[T], connect func(context.Context)) {
+ mcast, next, complete := Multicast[T](opts...)
+ connect = func(ctx context.Context) {
+ src.Observe(ctx, next, complete)
+ }
+ return mcast, connect
+}
+
+// Throttle limits the rate at which items are emitted.
+func Throttle[T any](src Observable[T], ratePerSecond float64, burst int) Observable[T] {
+ return FuncObservable[T](
+ func(ctx context.Context, next func(T), complete func(error)) {
+ limiter := rate.NewLimiter(rate.Limit(ratePerSecond), burst)
+ var limiterErr error
+ subCtx, cancel := context.WithCancel(ctx)
+ src.Observe(
+ subCtx,
+ func(item T) {
+ limiterErr = limiter.Wait(ctx)
+ if limiterErr != nil {
+ cancel()
+ return
+ }
+ next(item)
+ },
+ func(err error) {
+ if limiterErr != nil {
+ complete(limiterErr)
+ } else {
+ complete(err)
+ }
+
+ },
+ )
+ })
+}
+
+// Debounce emits an item only after the specified duration has lapsed since
+// the previous item was emitted. Only the latest item is emitted.
+//
+// In: a b c d e |->
+// Out: a d e |->
+func Debounce[T any](src Observable[T], duration time.Duration) Observable[T] {
+ return FuncObservable[T](
+ func(ctx context.Context, next func(T), complete func(error)) {
+ errs := make(chan error, 1)
+ items := ToChannel(ctx, src, WithErrorChan(errs))
+ go func() {
+ defer close(errs)
+
+ timer := time.NewTimer(duration)
+ defer timer.Stop()
+
+ timerElapsed := true // Do not delay the first item.
+ var latest *T
+
+ for {
+ select {
+ case err := <-errs:
+ complete(err)
+ return
+
+ case item := <-items:
+ if timerElapsed {
+ next(item)
+ timerElapsed = false
+ latest = nil
+ timer.Reset(duration)
+ } else {
+ latest = &item
+ }
+
+ case <-timer.C:
+ if latest != nil {
+ next(*latest)
+ latest = nil
+ timer.Reset(duration)
+ } else {
+ timerElapsed = true
+ }
+ }
+ }
+ }()
+ })
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/stream/sinks.go b/vendor/github.com/cilium/cilium/pkg/stream/sinks.go
new file mode 100644
index 000000000..23c5ee227
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/stream/sinks.go
@@ -0,0 +1,165 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package stream
+
+import (
+ "context"
+ "io"
+ "sync"
+ "sync/atomic"
+)
+
+//
+// Sinks: operators that consume the observable to produce a value.
+//
+
+// First returns the first item from 'src' observable and then cancels
+// the subscription. Blocks until first item is observed or the stream
+// is completed. If the observable completes without emitting items
+// then io.EOF error is returned.
+func First[T any](ctx context.Context, src Observable[T]) (item T, err error) {
+ subCtx, cancel := context.WithCancel(ctx)
+ var taken atomic.Bool
+ errs := make(chan error)
+ src.Observe(subCtx,
+ func(x T) {
+ if !taken.CompareAndSwap(false, true) {
+ return
+ }
+ item = x
+ cancel()
+ },
+ func(err error) {
+ errs <- err
+ close(errs)
+ })
+
+ err = <-errs
+
+ if taken.Load() {
+ // We got the item, ignore any error.
+ err = nil
+ } else if err == nil {
+ // No error and no item => EOF
+ err = io.EOF
+ }
+
+ return
+}
+
+// Last returns the last item from 'src' observable. Blocks until
+// the stream has been completed. If no items are observed then
+// io.EOF error is returned.
+func Last[T any](ctx context.Context, src Observable[T]) (item T, err error) {
+ errs := make(chan error)
+ var taken atomic.Bool
+ src.Observe(
+ ctx,
+ func(x T) {
+ item = x
+ taken.Store(true)
+ },
+ func(err error) {
+ errs <- err
+ close(errs)
+ })
+
+ err = <-errs
+ if taken.Load() {
+ // We got the item, ignore any error.
+ err = nil
+ } else if err == nil {
+ // No error and no item => EOF
+ err = io.EOF
+ }
+ return item, err
+}
+
+// ToSlice converts an Observable into a slice.
+//
+// ToSlice(ctx, Range(1,4))
+// => ([]int{1,2,3}, nil)
+func ToSlice[T any](ctx context.Context, src Observable[T]) (items []T, err error) {
+ errs := make(chan error)
+ items = make([]T, 0)
+ src.Observe(
+ ctx,
+ func(item T) {
+ items = append(items, item)
+ },
+ func(err error) {
+ errs <- err
+ close(errs)
+ })
+ return items, <-errs
+}
+
+type toChannelOpts struct {
+ bufferSize int
+ errorChan chan error
+}
+
+type ToChannelOpt func(*toChannelOpts)
+
+// WithBufferSize sets the buffer size of the channel returned by ToChannel.
+func WithBufferSize(n int) ToChannelOpt {
+ return func(o *toChannelOpts) {
+ o.bufferSize = n
+ }
+}
+
+// WithErrorChan asks ToChannel to send completion error to the provided channel.
+func WithErrorChan(errCh chan error) ToChannelOpt {
+ return func(o *toChannelOpts) {
+ o.errorChan = errCh
+ }
+}
+
+// ToChannel converts an observable into a channel.
+// When the provided context is cancelled the underlying subscription is cancelled
+// and the channel is closed. To receive completion errors use [WithErrorChan].
+//
+// items <- ToChannel(ctx, Range(1,4))
+// a := <- items
+// b := <- items
+// c := <- items
+// _, ok := <- items
+// => a=1, b=2, c=3, ok=false
+func ToChannel[T any](ctx context.Context, src Observable[T], opts ...ToChannelOpt) <-chan T {
+ var o toChannelOpts
+ for _, opt := range opts {
+ opt(&o)
+ }
+ items := make(chan T, o.bufferSize)
+ src.Observe(
+ ctx,
+ func(item T) { items <- item },
+ func(err error) {
+ close(items)
+ if o.errorChan != nil {
+ o.errorChan <- err
+ }
+ })
+ return items
+}
+
+// Discard discards all items from 'src'.
+func Discard[T any](ctx context.Context, src Observable[T]) {
+ src.Observe(ctx,
+ func(item T) {},
+ func(err error) {})
+}
+
+// ObserveWithWaitGroup is like Observe(), but adds to a WaitGroup and calls
+// Done() when complete.
+func ObserveWithWaitGroup[T any](ctx context.Context, wg *sync.WaitGroup, src Observable[T], next func(T), complete func(error)) {
+ wg.Add(1)
+ src.Observe(
+ ctx,
+ next,
+ func(err error) {
+ complete(err)
+ wg.Done()
+ })
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/stream/sources.go b/vendor/github.com/cilium/cilium/pkg/stream/sources.go
new file mode 100644
index 000000000..a9e3f06e2
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/stream/sources.go
@@ -0,0 +1,263 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package stream
+
+import (
+ "context"
+
+ "github.com/cilium/cilium/pkg/lock"
+)
+
+// Just creates an observable that emits a single item and completes.
+//
+// xs, err := ToSlice(ctx, Just(1))
+// => xs == []int{1}, err == nil
+func Just[T any](item T) Observable[T] {
+ return FuncObservable[T](
+ func(ctx context.Context, next func(T), complete func(error)) {
+ go func() {
+ if err := ctx.Err(); err != nil {
+ complete(err)
+ } else {
+ next(item)
+ complete(nil)
+ }
+ }()
+ })
+}
+
+// Stuck creates an observable that never emits anything and
+// just waits for the context to be cancelled.
+// Mainly meant for testing.
+func Stuck[T any]() Observable[T] {
+ return FuncObservable[T](
+ func(ctx context.Context, next func(T), complete func(error)) {
+ go func() {
+ <-ctx.Done()
+ complete(ctx.Err())
+ }()
+ })
+}
+
+// Error creates an observable that fails immediately with given error.
+//
+// failErr = errors.New("fail")
+// xs, err := ToSlice(ctx, Error[int](failErr))
+// => xs == []int{}, err == failErr
+func Error[T any](err error) Observable[T] {
+ return FuncObservable[T](
+ func(ctx context.Context, next func(T), complete func(error)) {
+ go complete(err)
+ })
+}
+
+// Empty creates an "empty" observable that completes immediately.
+//
+// xs, err := ToSlice(Empty[int]())
+// => xs == []int{}, err == nil
+func Empty[T any]() Observable[T] {
+ return Error[T](nil)
+}
+
+// FromSlice converts a slice into an Observable.
+//
+// ToSlice(ctx, FromSlice([]int{1,2,3})
+// => []int{1,2,3}
+func FromSlice[T any](items []T) Observable[T] {
+ // Emit items in chunks to reduce overhead of mutex in ctx.Err().
+ const chunkSize = 64
+ return FuncObservable[T](
+ func(ctx context.Context, next func(T), complete func(error)) {
+ go func() {
+ for chunk := 0; chunk < len(items); chunk += chunkSize {
+ if err := ctx.Err(); err != nil {
+ complete(err)
+ return
+ }
+ for i := chunk; i < len(items) && i < chunk+chunkSize; i++ {
+ next(items[i])
+ }
+ }
+ complete(nil)
+ }()
+ })
+}
+
+// FromChannel creates an observable from a channel. The channel is consumed
+// by the first observer.
+//
+// values := make(chan int)
+// go func() {
+// values <- 1
+// values <- 2
+// values <- 3
+// close(values)
+// }()
+// obs := FromChannel(values)
+// xs, err := ToSlice(ctx, obs)
+// => xs == []int{1,2,3}, err == nil
+//
+// xs, err = ToSlice(ctx, obs)
+// => xs == []int{}, err == nil
+func FromChannel[T any](in <-chan T) Observable[T] {
+ return FuncObservable[T](
+ func(ctx context.Context, next func(T), complete func(error)) {
+ go func() {
+ done := ctx.Done()
+ for {
+ select {
+ case <-done:
+ complete(ctx.Err())
+ return
+ case v, ok := <-in:
+ if !ok {
+ complete(nil)
+ return
+ }
+ next(v)
+ }
+ }
+ }()
+ })
+}
+
+// Range creates an observable that emits integers in range from...to-1.
+//
+// ToSlice(ctx, Range(1,2,3)) => []int{1,2,3}
+func Range(from, to int) Observable[int] {
+ return FuncObservable[int](
+ func(ctx context.Context, next func(int), complete func(error)) {
+ go func() {
+ for i := from; i < to; i++ {
+ if ctx.Err() != nil {
+ break
+ }
+ next(i)
+ }
+ complete(ctx.Err())
+ }()
+ })
+}
+
+type mcastSubscriber[T any] struct {
+ next func(T)
+ complete func()
+}
+
+type MulticastOpt func(o *mcastOpts)
+
+type mcastOpts struct {
+ emitLatest bool
+}
+
+func (o mcastOpts) apply(opts []MulticastOpt) mcastOpts {
+ for _, opt := range opts {
+ opt(&o)
+ }
+ return o
+}
+
+// Multicast options
+var (
+ // Emit the latest seen item when subscribing.
+ EmitLatest = func(o *mcastOpts) { o.emitLatest = true }
+)
+
+// Multicast creates an observable that "multicasts" the emitted items to all observers.
+//
+// mcast, next, complete := Multicast[int]()
+// next(1) // no observers, none receives this
+// sub1 := ToChannel(ctx, mcast, WithBufferSize(10))
+// sub2 := ToChannel(ctx, mcast, WithBufferSize(10))
+// next(2)
+// next(3)
+// complete(nil)
+// => sub1 == sub2 == [2,3]
+//
+// mcast, next, complete = Multicast[int](EmitLatest)
+// next(1)
+// next(2) // "EmitLatest" tells Multicast to keep this
+// x, err := First(ctx, mcast)
+// => x == 2, err == nil
+func Multicast[T any](opts ...MulticastOpt) (mcast Observable[T], next func(T), complete func(error)) {
+ var (
+ mu lock.Mutex
+ subId int
+ subs = make(map[int]mcastSubscriber[T])
+ latestValue T
+ completed bool
+ completeErr error
+ haveLatest bool
+ opt = mcastOpts{}.apply(opts)
+ )
+
+ next = func(item T) {
+ mu.Lock()
+ defer mu.Unlock()
+ if completed {
+ return
+ }
+ if opt.emitLatest {
+ latestValue = item
+ haveLatest = true
+ }
+ for _, sub := range subs {
+ sub.next(item)
+ }
+ }
+
+ complete = func(err error) {
+ mu.Lock()
+ defer mu.Unlock()
+ completed = true
+ completeErr = err
+ for _, sub := range subs {
+ sub.complete()
+ }
+ subs = nil
+ }
+
+ mcast = FuncObservable[T](
+ func(ctx context.Context, subNext func(T), subComplete func(error)) {
+ mu.Lock()
+ if completed {
+ mu.Unlock()
+ go subComplete(completeErr)
+ return
+ }
+
+ subCtx, cancel := context.WithCancel(ctx)
+ thisId := subId
+ subId++
+ subs[thisId] = mcastSubscriber[T]{
+ subNext,
+ cancel,
+ }
+
+ // Continue subscribing asynchronously so caller is not blocked.
+ go func() {
+ if opt.emitLatest && haveLatest {
+ subNext(latestValue)
+ }
+ mu.Unlock()
+
+ // Wait for cancellation by observer, or completion from upstream.
+ <-subCtx.Done()
+
+ // Remove the observer and complete.
+ var err error
+ mu.Lock()
+ delete(subs, thisId)
+ if completed {
+ err = completeErr
+ } else {
+ err = subCtx.Err()
+ }
+ mu.Unlock()
+ subComplete(err)
+ }()
+ })
+
+ return
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/trigger/doc.go b/vendor/github.com/cilium/cilium/pkg/trigger/doc.go
new file mode 100644
index 000000000..7ca449cd3
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/trigger/doc.go
@@ -0,0 +1,6 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Package trigger provides a mechanism to trigger actions that require to be
+// serialized while providing a non-blocking notification mechanism
+package trigger
diff --git a/vendor/github.com/cilium/cilium/pkg/trigger/trigger.go b/vendor/github.com/cilium/cilium/pkg/trigger/trigger.go
new file mode 100644
index 000000000..a6d425daa
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/trigger/trigger.go
@@ -0,0 +1,221 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package trigger
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/cilium/cilium/pkg/inctimer"
+ "github.com/cilium/cilium/pkg/lock"
+)
+
+// MetricsObserver is the interface a metrics collector has to implement in
+// order to collect trigger metrics
+type MetricsObserver interface {
+ // PostRun is called after a trigger run with the call duration, the
+ // latency between 1st queue request and the call run and the number of
+ // queued events folded into the last run
+ PostRun(callDuration, latency time.Duration, folds int)
+
+ // QueueEvent is called when Trigger() is called to schedule a trigger
+ // run
+ QueueEvent(reason string)
+}
+
+// Parameters are the user specified parameters
+type Parameters struct {
+ // MinInterval is the minimum required interval between invocations of
+ // TriggerFunc
+ MinInterval time.Duration
+
+ // TriggerFunc is the function to be called when Trigger() is called
+ // while respecting MinInterval and serialization
+ TriggerFunc func(reasons []string)
+
+ // ShutdownFunc is called when the trigger is shut down
+ ShutdownFunc func()
+
+ MetricsObserver MetricsObserver
+
+ // Name is the unique name of the trigger. It must be provided in a
+ // format compatible to be used as prometheus name string.
+ Name string
+
+ // sleepInterval controls the waiter sleep duration. This parameter is
+ // only exposed to tests
+ sleepInterval time.Duration
+}
+
+type reasonStack map[string]struct{}
+
+func newReasonStack() reasonStack {
+ return map[string]struct{}{}
+}
+
+func (r reasonStack) add(reason string) {
+ r[reason] = struct{}{}
+}
+
+func (r reasonStack) slice() []string {
+ result := make([]string, len(r))
+ i := 0
+ for reason := range r {
+ result[i] = reason
+ i++
+ }
+ return result
+}
+
+// Trigger represents an active trigger logic. Use NewTrigger() to create a
+// trigger
+type Trigger struct {
+ // protect mutual access of 'trigger' between Trigger() and waiter()
+ mutex lock.Mutex
+ trigger bool
+
+ // params are the user specified parameters
+ params Parameters
+
+ // lastTrigger is the timestamp of the last invoked trigger
+ lastTrigger time.Time
+
+ // wakeupCan is used to wake up the background trigger routine
+ wakeupChan chan struct{}
+
+ // closeChan is used to stop the background trigger routine
+ closeChan chan struct{}
+
+ // numFolds is the current count of folds that happened into the
+ // currently scheduled trigger
+ numFolds int
+
+ // foldedReasons is the sum of all unique reasons folded together.
+ foldedReasons reasonStack
+
+ waitStart time.Time
+}
+
+// NewTrigger returns a new trigger based on the provided parameters
+func NewTrigger(p Parameters) (*Trigger, error) {
+ if p.sleepInterval == 0 {
+ p.sleepInterval = time.Second
+ }
+
+ if p.TriggerFunc == nil {
+ return nil, fmt.Errorf("trigger function is nil")
+ }
+
+ t := &Trigger{
+ params: p,
+ wakeupChan: make(chan struct{}, 1),
+ closeChan: make(chan struct{}, 1),
+ foldedReasons: newReasonStack(),
+ }
+
+ // Guarantee that initial trigger has no delay
+ if p.MinInterval > time.Duration(0) {
+ t.lastTrigger = time.Now().Add(-1 * p.MinInterval)
+ }
+
+ go t.waiter()
+
+ return t, nil
+}
+
+// needsDelay returns whether and how long of a delay is required to fullfil
+// MinInterval
+func (t *Trigger) needsDelay() (bool, time.Duration) {
+ if t.params.MinInterval == time.Duration(0) {
+ return false, 0
+ }
+
+ sleepTime := time.Since(t.lastTrigger.Add(t.params.MinInterval))
+ return sleepTime < 0, sleepTime * -1
+}
+
+// Trigger triggers the call to TriggerFunc as specified in the parameters
+// provided to NewTrigger(). It respects MinInterval and ensures that calls to
+// TriggerFunc are serialized. This function is non-blocking and will return
+// immediately before TriggerFunc is potentially triggered and has completed.
+func (t *Trigger) TriggerWithReason(reason string) {
+ t.mutex.Lock()
+ t.trigger = true
+ if t.numFolds == 0 {
+ t.waitStart = time.Now()
+ }
+ t.numFolds++
+ t.foldedReasons.add(reason)
+ t.mutex.Unlock()
+
+ if t.params.MetricsObserver != nil {
+ t.params.MetricsObserver.QueueEvent(reason)
+ }
+
+ select {
+ case t.wakeupChan <- struct{}{}:
+ default:
+ }
+}
+
+// Trigger triggers the call to TriggerFunc as specified in the parameters
+// provided to NewTrigger(). It respects MinInterval and ensures that calls to
+// TriggerFunc are serialized. This function is non-blocking and will return
+// immediately before TriggerFunc is potentially triggered and has completed.
+func (t *Trigger) Trigger() {
+ t.TriggerWithReason("")
+}
+
+// Shutdown stops the trigger mechanism
+func (t *Trigger) Shutdown() {
+ close(t.closeChan)
+}
+
+func (t *Trigger) waiter() {
+ sleepTimer, sleepTimerDone := inctimer.New()
+ defer sleepTimerDone()
+ for {
+ // keep critical section as small as possible
+ t.mutex.Lock()
+ triggerEnabled := t.trigger
+ t.trigger = false
+ t.mutex.Unlock()
+
+ // run the trigger function
+ if triggerEnabled {
+ if delayNeeded, delay := t.needsDelay(); delayNeeded {
+ time.Sleep(delay)
+ }
+
+ t.mutex.Lock()
+ t.lastTrigger = time.Now()
+ numFolds := t.numFolds
+ t.numFolds = 0
+ reasons := t.foldedReasons.slice()
+ t.foldedReasons = newReasonStack()
+ callLatency := time.Since(t.waitStart)
+ t.mutex.Unlock()
+
+ beforeTrigger := time.Now()
+ t.params.TriggerFunc(reasons)
+
+ if t.params.MetricsObserver != nil {
+ callDuration := time.Since(beforeTrigger)
+ t.params.MetricsObserver.PostRun(callDuration, callLatency, numFolds)
+ }
+ }
+
+ select {
+ case <-t.wakeupChan:
+ case <-sleepTimer.After(t.params.sleepInterval):
+
+ case <-t.closeChan:
+ shutdownFunc := t.params.ShutdownFunc
+ if shutdownFunc != nil {
+ shutdownFunc()
+ }
+ return
+ }
+ }
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/u8proto/u8proto.go b/vendor/github.com/cilium/cilium/pkg/u8proto/u8proto.go
new file mode 100644
index 000000000..2df035a04
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/u8proto/u8proto.go
@@ -0,0 +1,58 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+package u8proto
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// These definitions must contain and be compatible with the string
+// values defined for pkg/pollicy/api/L4Proto
+
+const (
+ // ANY represents all protocols.
+ ANY U8proto = 0
+ ICMP U8proto = 1
+ TCP U8proto = 6
+ UDP U8proto = 17
+ ICMPv6 U8proto = 58
+ SCTP U8proto = 132
+)
+
+var protoNames = map[U8proto]string{
+ 0: "ANY",
+ 1: "ICMP",
+ 6: "TCP",
+ 17: "UDP",
+ 58: "ICMPv6",
+ 132: "SCTP",
+}
+
+var ProtoIDs = map[string]U8proto{
+ "all": 0,
+ "any": 0,
+ "icmp": 1,
+ "tcp": 6,
+ "udp": 17,
+ "icmpv6": 58,
+ "sctp": 132,
+}
+
+type U8proto uint8
+
+func (p U8proto) String() string {
+ if _, ok := protoNames[p]; ok {
+ return protoNames[p]
+ }
+ return strconv.Itoa(int(p))
+}
+
+func ParseProtocol(proto string) (U8proto, error) {
+ if u, ok := ProtoIDs[strings.ToLower(proto)]; ok {
+ return u, nil
+ }
+ return 0, fmt.Errorf("unknown protocol '%s'", proto)
+}
diff --git a/vendor/github.com/cilium/cilium/pkg/wireguard/types/types.go b/vendor/github.com/cilium/cilium/pkg/wireguard/types/types.go
new file mode 100644
index 000000000..a6fb64615
--- /dev/null
+++ b/vendor/github.com/cilium/cilium/pkg/wireguard/types/types.go
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: Apache-2.0
+// Copyright Authors of Cilium
+
+// Common WireGuard types and constants
+package types
+
+const (
+ // IfaceName is the name of the WireGuard tunnel device
+ IfaceName = "cilium_wg0"
+ // PrivKeyFilename is the name of the WireGuard private key file
+ PrivKeyFilename = "cilium_wg0.key"
+ // StaticEncryptKey is used in the IPCache to mark entries for which we
+ // want to enable WireGuard encryption
+ StaticEncryptKey = uint8(0xFF)
+)
diff --git a/vendor/github.com/cilium/ebpf/.clang-format b/vendor/github.com/cilium/ebpf/.clang-format
new file mode 100644
index 000000000..0ff425760
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/.clang-format
@@ -0,0 +1,25 @@
+---
+Language: Cpp
+BasedOnStyle: LLVM
+AlignAfterOpenBracket: DontAlign
+AlignConsecutiveAssignments: true
+AlignEscapedNewlines: DontAlign
+# mkdocs annotations in source code are written as trailing comments
+# and alignment pushes these really far away from the content.
+AlignTrailingComments: false
+AlwaysBreakBeforeMultilineStrings: true
+AlwaysBreakTemplateDeclarations: false
+AllowAllParametersOfDeclarationOnNextLine: false
+AllowShortFunctionsOnASingleLine: false
+BreakBeforeBraces: Attach
+IndentWidth: 4
+KeepEmptyLinesAtTheStartOfBlocks: false
+TabWidth: 4
+UseTab: ForContinuationAndIndentation
+ColumnLimit: 1000
+# Go compiler comments need to stay unindented.
+CommentPragmas: '^go:.*'
+# linux/bpf.h needs to be included before bpf/bpf_helpers.h for types like __u64
+# and sorting makes this impossible.
+SortIncludes: false
+...
diff --git a/vendor/github.com/cilium/ebpf/.gitignore b/vendor/github.com/cilium/ebpf/.gitignore
new file mode 100644
index 000000000..b46162b8e
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/.gitignore
@@ -0,0 +1,14 @@
+# Binaries for programs and plugins
+*.exe
+*.exe~
+*.dll
+*.so
+*.dylib
+*.o
+!*_bpf*.o
+
+# Test binary, build with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
diff --git a/vendor/github.com/cilium/ebpf/.golangci.yaml b/vendor/github.com/cilium/ebpf/.golangci.yaml
new file mode 100644
index 000000000..65f91b910
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/.golangci.yaml
@@ -0,0 +1,13 @@
+---
+linters:
+ disable-all: true
+ enable:
+ - goimports
+ - gosimple
+ - govet
+ - ineffassign
+ - misspell
+ - staticcheck
+ - typecheck
+ - unused
+ - gofmt
diff --git a/vendor/github.com/cilium/ebpf/ARCHITECTURE.md b/vendor/github.com/cilium/ebpf/ARCHITECTURE.md
new file mode 100644
index 000000000..26f555eb7
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/ARCHITECTURE.md
@@ -0,0 +1,92 @@
+Architecture of the library
+===
+
+```mermaid
+graph RL
+ Program --> ProgramSpec --> ELF
+ btf.Spec --> ELF
+ Map --> MapSpec --> ELF
+ Links --> Map & Program
+ ProgramSpec -.-> btf.Spec
+ MapSpec -.-> btf.Spec
+ subgraph Collection
+ Program & Map
+ end
+ subgraph CollectionSpec
+ ProgramSpec & MapSpec & btf.Spec
+ end
+```
+
+ELF
+---
+
+BPF is usually produced by using Clang to compile a subset of C. Clang outputs
+an ELF file which contains program byte code (aka BPF), but also metadata for
+maps used by the program. The metadata follows the conventions set by libbpf
+shipped with the kernel. Certain ELF sections have special meaning
+and contain structures defined by libbpf. Newer versions of clang emit
+additional metadata in [BPF Type Format](#BTF).
+
+The library aims to be compatible with libbpf so that moving from a C toolchain
+to a Go one creates little friction. To that end, the [ELF reader](elf_reader.go)
+is tested against the Linux selftests and avoids introducing custom behaviour
+if possible.
+
+The output of the ELF reader is a `CollectionSpec` which encodes
+all of the information contained in the ELF in a form that is easy to work with
+in Go. The returned `CollectionSpec` should be deterministic: reading the same ELF
+file on different systems must produce the same output.
+As a corollary, any changes that depend on the runtime environment like the
+current kernel version must happen when creating [Objects](#Objects).
+
+Specifications
+---
+
+`CollectionSpec` is a very simple container for `ProgramSpec`, `MapSpec` and
+`btf.Spec`. Avoid adding functionality to it if possible.
+
+`ProgramSpec` and `MapSpec` are blueprints for in-kernel
+objects and contain everything necessary to execute the relevant `bpf(2)`
+syscalls. They refer to `btf.Spec` for type information such as `Map` key and
+value types.
+
+The [asm](asm/) package provides an assembler that can be used to generate
+`ProgramSpec` on the fly.
+
+Objects
+---
+
+`Program` and `Map` are the result of loading specifications into the kernel.
+Features that depend on knowledge of the current system (e.g kernel version)
+are implemented at this point.
+
+Sometimes loading a spec will fail because the kernel is too old, or a feature is not
+enabled. There are multiple ways the library deals with that:
+
+* Fallback: older kernels don't allow naming programs and maps. The library
+ automatically detects support for names, and omits them during load if
+ necessary. This works since name is primarily a debug aid.
+
+* Sentinel error: sometimes it's possible to detect that a feature isn't available.
+ In that case the library will return an error wrapping `ErrNotSupported`.
+ This is also useful to skip tests that can't run on the current kernel.
+
+Once program and map objects are loaded they expose the kernel's low-level API,
+e.g. `NextKey`. Often this API is awkward to use in Go, so there are safer
+wrappers on top of the low-level API, like `MapIterator`. The low-level API is
+useful when our higher-level API doesn't support a particular use case.
+
+Links
+---
+
+Programs can be attached to many different points in the kernel and newer BPF hooks
+tend to use bpf_link to do so. Older hooks unfortunately use a combination of
+syscalls, netlink messages, etc. Adding support for a new link type should not
+pull in large dependencies like netlink, so XDP programs or tracepoints are
+out of scope.
+
+Each bpf_link_type has one corresponding Go type, e.g. `link.tracing` corresponds
+to BPF_LINK_TRACING. In general, these types should be unexported as long as they
+don't export methods outside of the Link interface. Each Go type may have multiple
+exported constructors. For example `AttachTracing` and `AttachLSM` create a
+tracing link, but are distinct functions since they may require different arguments.
diff --git a/vendor/github.com/cilium/ebpf/CODE_OF_CONDUCT.md b/vendor/github.com/cilium/ebpf/CODE_OF_CONDUCT.md
new file mode 100644
index 000000000..8e42838c5
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/CODE_OF_CONDUCT.md
@@ -0,0 +1,46 @@
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.
+
+## Our Standards
+
+Examples of behavior that contributes to creating a positive environment include:
+
+* Using welcoming and inclusive language
+* Being respectful of differing viewpoints and experiences
+* Gracefully accepting constructive criticism
+* Focusing on what is best for the community
+* Showing empathy towards other community members
+
+Examples of unacceptable behavior by participants include:
+
+* The use of sexualized language or imagery and unwelcome sexual attention or advances
+* Trolling, insulting/derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or electronic address, without explicit permission
+* Other conduct which could reasonably be considered inappropriate in a professional setting
+
+## Our Responsibilities
+
+Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
+
+## Scope
+
+This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at nathanjsweet at gmail dot com or i at lmb dot io. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
+
+Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]
+
+[homepage]: http://contributor-covenant.org
+[version]: http://contributor-covenant.org/version/1/4/
diff --git a/vendor/github.com/cilium/ebpf/CONTRIBUTING.md b/vendor/github.com/cilium/ebpf/CONTRIBUTING.md
new file mode 100644
index 000000000..bf57da939
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/CONTRIBUTING.md
@@ -0,0 +1,48 @@
+# How to contribute
+
+Development is on [GitHub](https://github.com/cilium/ebpf) and contributions in
+the form of pull requests and issues reporting bugs or suggesting new features
+are welcome. Please take a look at [the architecture](ARCHITECTURE.md) to get
+a better understanding for the high-level goals.
+
+## Adding a new feature
+
+1. [Join](https://ebpf.io/slack) the
+[#ebpf-go](https://cilium.slack.com/messages/ebpf-go) channel to discuss your requirements and how the feature can be implemented. The most important part is figuring out how much new exported API is necessary. **The less new API is required the easier it will be to land the feature.**
+2. (*optional*) Create a draft PR if you want to discuss the implementation or have hit a problem. It's fine if this doesn't compile or contains debug statements.
+3. Create a PR that is ready to merge. This must pass CI and have tests.
+
+### API stability
+
+The library doesn't guarantee the stability of its API at the moment.
+
+1. If possible avoid breakage by introducing new API and deprecating the old one
+ at the same time. If an API was deprecated in v0.x it can be removed in v0.x+1.
+2. Breaking API in a way that causes compilation failures is acceptable but must
+ have good reasons.
+3. Changing the semantics of the API without causing compilation failures is
+ heavily discouraged.
+
+## Running the tests
+
+Many of the tests require privileges to set resource limits and load eBPF code.
+The easiest way to obtain these is to run the tests with `sudo`.
+
+To test the current package with your local kernel you can simply run:
+```
+go test -exec sudo ./...
+```
+
+To test the current package with a different kernel version you can use the [run-tests.sh](run-tests.sh) script.
+It requires [virtme](https://github.com/amluto/virtme) and qemu to be installed.
+
+Examples:
+
+```bash
+# Run all tests on a 5.4 kernel
+./run-tests.sh 5.4
+
+# Run a subset of tests:
+./run-tests.sh 5.4 ./link
+```
+
diff --git a/vendor/github.com/cilium/ebpf/LICENSE b/vendor/github.com/cilium/ebpf/LICENSE
new file mode 100644
index 000000000..c637ae99c
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/LICENSE
@@ -0,0 +1,23 @@
+MIT License
+
+Copyright (c) 2017 Nathan Sweet
+Copyright (c) 2018, 2019 Cloudflare
+Copyright (c) 2019 Authors of Cilium
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/cilium/ebpf/MAINTAINERS.md b/vendor/github.com/cilium/ebpf/MAINTAINERS.md
new file mode 100644
index 000000000..a56a03e39
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/MAINTAINERS.md
@@ -0,0 +1,3 @@
+# Maintainers
+
+Maintainers can be found in the [Cilium Maintainers file](https://github.com/cilium/community/blob/main/roles/Maintainers.md)
diff --git a/vendor/github.com/cilium/ebpf/Makefile b/vendor/github.com/cilium/ebpf/Makefile
new file mode 100644
index 000000000..0fa8cdc52
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/Makefile
@@ -0,0 +1,114 @@
+# The development version of clang is distributed as the 'clang' binary,
+# while stable/released versions have a version number attached.
+# Pin the default clang to a stable version.
+CLANG ?= clang-17
+STRIP ?= llvm-strip-17
+OBJCOPY ?= llvm-objcopy-17
+CFLAGS := -O2 -g -Wall -Werror $(CFLAGS)
+
+CI_KERNEL_URL ?= https://github.com/cilium/ci-kernels/raw/master/
+
+# Obtain an absolute path to the directory of the Makefile.
+# Assume the Makefile is in the root of the repository.
+REPODIR := $(shell dirname $(realpath $(firstword $(MAKEFILE_LIST))))
+UIDGID := $(shell stat -c '%u:%g' ${REPODIR})
+
+# Prefer podman if installed, otherwise use docker.
+# Note: Setting the var at runtime will always override.
+CONTAINER_ENGINE ?= $(if $(shell command -v podman), podman, docker)
+CONTAINER_RUN_ARGS ?= $(if $(filter ${CONTAINER_ENGINE}, podman), --log-driver=none, --user "${UIDGID}")
+
+IMAGE := $(shell cat ${REPODIR}/testdata/docker/IMAGE)
+VERSION := $(shell cat ${REPODIR}/testdata/docker/VERSION)
+
+TARGETS := \
+ testdata/loader-clang-11 \
+ testdata/loader-clang-14 \
+ testdata/loader-$(CLANG) \
+ testdata/manyprogs \
+ testdata/btf_map_init \
+ testdata/invalid_map \
+ testdata/raw_tracepoint \
+ testdata/invalid_map_static \
+ testdata/invalid_btf_map_init \
+ testdata/strings \
+ testdata/freplace \
+ testdata/fentry_fexit \
+ testdata/iproute2_map_compat \
+ testdata/map_spin_lock \
+ testdata/subprog_reloc \
+ testdata/fwd_decl \
+ testdata/kconfig \
+ testdata/kconfig_config \
+ testdata/kfunc \
+ testdata/invalid-kfunc \
+ testdata/kfunc-kmod \
+ testdata/constants \
+ btf/testdata/relocs \
+ btf/testdata/relocs_read \
+ btf/testdata/relocs_read_tgt \
+ cmd/bpf2go/testdata/minimal
+
+.PHONY: all clean container-all container-shell generate
+
+.DEFAULT_TARGET = container-all
+
+# Build all ELF binaries using a containerized LLVM toolchain.
+container-all:
+ +${CONTAINER_ENGINE} run --rm -t ${CONTAINER_RUN_ARGS} \
+ -v "${REPODIR}":/ebpf -w /ebpf --env MAKEFLAGS \
+ --env HOME="/tmp" \
+ --env BPF2GO_CC="$(CLANG)" \
+ --env BPF2GO_FLAGS="-fdebug-prefix-map=/ebpf=. $(CFLAGS)" \
+ "${IMAGE}:${VERSION}" \
+ make all
+
+# (debug) Drop the user into a shell inside the container as root.
+# Set BPF2GO_ envs to make 'make generate' just work.
+container-shell:
+ ${CONTAINER_ENGINE} run --rm -ti \
+ -v "${REPODIR}":/ebpf -w /ebpf \
+ --env BPF2GO_CC="$(CLANG)" \
+ --env BPF2GO_FLAGS="-fdebug-prefix-map=/ebpf=. $(CFLAGS)" \
+ "${IMAGE}:${VERSION}"
+
+clean:
+ find "$(CURDIR)" -name "*.elf" -delete
+ find "$(CURDIR)" -name "*.o" -delete
+
+format:
+ find . -type f -name "*.c" | xargs clang-format -i
+
+all: format $(addsuffix -el.elf,$(TARGETS)) $(addsuffix -eb.elf,$(TARGETS)) generate
+ ln -srf testdata/loader-$(CLANG)-el.elf testdata/loader-el.elf
+ ln -srf testdata/loader-$(CLANG)-eb.elf testdata/loader-eb.elf
+
+generate:
+ go generate ./...
+
+testdata/loader-%-el.elf: testdata/loader.c
+ $* $(CFLAGS) -target bpfel -c $< -o $@
+ $(STRIP) -g $@
+
+testdata/loader-%-eb.elf: testdata/loader.c
+ $* $(CFLAGS) -target bpfeb -c $< -o $@
+ $(STRIP) -g $@
+
+%-el.elf: %.c
+ $(CLANG) $(CFLAGS) -target bpfel -c $< -o $@
+ $(STRIP) -g $@
+
+%-eb.elf : %.c
+ $(CLANG) $(CFLAGS) -target bpfeb -c $< -o $@
+ $(STRIP) -g $@
+
+.PHONY: generate-btf
+generate-btf: KERNEL_VERSION?=6.1.29
+generate-btf:
+ $(eval TMP := $(shell mktemp -d))
+ curl -fL "$(CI_KERNEL_URL)/linux-$(KERNEL_VERSION)-amd64.tgz" -o "$(TMP)/linux.tgz"
+ tar xvf "$(TMP)/linux.tgz" -C "$(TMP)" --strip-components=2 ./boot/vmlinuz ./lib/modules
+ /lib/modules/$(shell uname -r)/build/scripts/extract-vmlinux "$(TMP)/vmlinuz" > "$(TMP)/vmlinux"
+ $(OBJCOPY) --dump-section .BTF=/dev/stdout "$(TMP)/vmlinux" /dev/null | gzip > "btf/testdata/vmlinux.btf.gz"
+ find "$(TMP)/modules" -type f -name bpf_testmod.ko -exec $(OBJCOPY) --dump-section .BTF="btf/testdata/btf_testmod.btf" {} /dev/null \;
+ $(RM) -r "$(TMP)"
diff --git a/vendor/github.com/cilium/ebpf/README.md b/vendor/github.com/cilium/ebpf/README.md
new file mode 100644
index 000000000..81235a69d
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/README.md
@@ -0,0 +1,82 @@
+# eBPF
+
+[![PkgGoDev](https://pkg.go.dev/badge/github.com/cilium/ebpf)](https://pkg.go.dev/github.com/cilium/ebpf)
+
+![HoneyGopher](docs/ebpf/ebpf-go.png)
+
+ebpf-go is a pure Go library that provides utilities for loading, compiling, and
+debugging eBPF programs. It has minimal external dependencies and is intended to
+be used in long running processes.
+
+See [ebpf.io](https://ebpf.io) for complementary projects from the wider eBPF
+ecosystem.
+
+## Getting Started
+
+A small collection of Go and eBPF programs that serve as examples for building
+your own tools can be found under [examples/](examples/).
+
+[Contributions](CONTRIBUTING.md) are highly encouraged, as they highlight certain use cases of
+eBPF and the library, and help shape the future of the project.
+
+## Getting Help
+
+The community actively monitors our [GitHub Discussions](https://github.com/cilium/ebpf/discussions) page.
+Please search for existing threads before starting a new one. Refrain from
+opening issues on the bug tracker if you're just starting out or if you're not
+sure if something is a bug in the library code.
+
+Alternatively, [join](https://ebpf.io/slack) the
+[#ebpf-go](https://cilium.slack.com/messages/ebpf-go) channel on Slack if you
+have other questions regarding the project. Note that this channel is ephemeral
+and has its history erased past a certain point, which is less helpful for
+others running into the same problem later.
+
+## Packages
+
+This library includes the following packages:
+
+* [asm](https://pkg.go.dev/github.com/cilium/ebpf/asm) contains a basic
+ assembler, allowing you to write eBPF assembly instructions directly
+ within your Go code. (You don't need to use this if you prefer to write your eBPF program in C.)
+* [cmd/bpf2go](https://pkg.go.dev/github.com/cilium/ebpf/cmd/bpf2go) allows
+ compiling and embedding eBPF programs written in C within Go code. As well as
+ compiling the C code, it auto-generates Go code for loading and manipulating
+ the eBPF program and map objects.
+* [link](https://pkg.go.dev/github.com/cilium/ebpf/link) allows attaching eBPF
+ to various hooks
+* [perf](https://pkg.go.dev/github.com/cilium/ebpf/perf) allows reading from a
+ `PERF_EVENT_ARRAY`
+* [ringbuf](https://pkg.go.dev/github.com/cilium/ebpf/ringbuf) allows reading from a
+ `BPF_MAP_TYPE_RINGBUF` map
+* [features](https://pkg.go.dev/github.com/cilium/ebpf/features) implements the equivalent
+ of `bpftool feature probe` for discovering BPF-related kernel features using native Go.
+* [rlimit](https://pkg.go.dev/github.com/cilium/ebpf/rlimit) provides a convenient API to lift
+ the `RLIMIT_MEMLOCK` constraint on kernels before 5.11.
+* [btf](https://pkg.go.dev/github.com/cilium/ebpf/btf) allows reading the BPF Type Format.
+
+## Requirements
+
+* A version of Go that is [supported by
+ upstream](https://golang.org/doc/devel/release.html#policy)
+* Linux >= 4.9. CI is run against kernel.org LTS releases. 4.4 should work but is
+ not tested against.
+
+## Regenerating Testdata
+
+Run `make` in the root of this repository to rebuild testdata in all
+subpackages. This requires Docker, as it relies on a standardized build
+environment to keep the build output stable.
+
+It is possible to regenerate data using Podman by overriding the `CONTAINER_*`
+variables: `CONTAINER_ENGINE=podman CONTAINER_RUN_ARGS= make`.
+
+The toolchain image build files are kept in [testdata/docker/](testdata/docker/).
+
+## License
+
+MIT
+
+### eBPF Gopher
+
+The eBPF honeygopher is based on the Go gopher designed by Renee French.
diff --git a/vendor/github.com/cilium/ebpf/asm/alu.go b/vendor/github.com/cilium/ebpf/asm/alu.go
new file mode 100644
index 000000000..7dc56204b
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/asm/alu.go
@@ -0,0 +1,149 @@
+package asm
+
+//go:generate go run golang.org/x/tools/cmd/stringer@latest -output alu_string.go -type=Source,Endianness,ALUOp
+
+// Source of ALU / ALU64 / Branch operations
+//
+// msb lsb
+// +----+-+---+
+// |op |S|cls|
+// +----+-+---+
+type Source uint8
+
+const sourceMask OpCode = 0x08
+
+// Source bitmask
+const (
+ // InvalidSource is returned by getters when invoked
+ // on non ALU / branch OpCodes.
+ InvalidSource Source = 0xff
+ // ImmSource src is from constant
+ ImmSource Source = 0x00
+ // RegSource src is from register
+ RegSource Source = 0x08
+)
+
+// The Endianness of a byte swap instruction.
+type Endianness uint8
+
+const endianMask = sourceMask
+
+// Endian flags
+const (
+ InvalidEndian Endianness = 0xff
+ // Convert to little endian
+ LE Endianness = 0x00
+ // Convert to big endian
+ BE Endianness = 0x08
+)
+
+// ALUOp are ALU / ALU64 operations
+//
+// msb lsb
+// +----+-+---+
+// |OP |s|cls|
+// +----+-+---+
+type ALUOp uint8
+
+const aluMask OpCode = 0xf0
+
+const (
+ // InvalidALUOp is returned by getters when invoked
+ // on non ALU OpCodes
+ InvalidALUOp ALUOp = 0xff
+ // Add - addition
+ Add ALUOp = 0x00
+ // Sub - subtraction
+ Sub ALUOp = 0x10
+ // Mul - multiplication
+ Mul ALUOp = 0x20
+ // Div - division
+ Div ALUOp = 0x30
+ // Or - bitwise or
+ Or ALUOp = 0x40
+ // And - bitwise and
+ And ALUOp = 0x50
+ // LSh - bitwise shift left
+ LSh ALUOp = 0x60
+ // RSh - bitwise shift right
+ RSh ALUOp = 0x70
+ // Neg - sign/unsign signing bit
+ Neg ALUOp = 0x80
+ // Mod - modulo
+ Mod ALUOp = 0x90
+ // Xor - bitwise xor
+ Xor ALUOp = 0xa0
+ // Mov - move value from one place to another
+ Mov ALUOp = 0xb0
+ // ArSh - arithmetic shift
+ ArSh ALUOp = 0xc0
+ // Swap - endian conversions
+ Swap ALUOp = 0xd0
+)
+
+// HostTo converts from host to another endianness.
+func HostTo(endian Endianness, dst Register, size Size) Instruction {
+ var imm int64
+ switch size {
+ case Half:
+ imm = 16
+ case Word:
+ imm = 32
+ case DWord:
+ imm = 64
+ default:
+ return Instruction{OpCode: InvalidOpCode}
+ }
+
+ return Instruction{
+ OpCode: OpCode(ALUClass).SetALUOp(Swap).SetSource(Source(endian)),
+ Dst: dst,
+ Constant: imm,
+ }
+}
+
+// Op returns the OpCode for an ALU operation with a given source.
+func (op ALUOp) Op(source Source) OpCode {
+ return OpCode(ALU64Class).SetALUOp(op).SetSource(source)
+}
+
+// Reg emits `dst (op) src`.
+func (op ALUOp) Reg(dst, src Register) Instruction {
+ return Instruction{
+ OpCode: op.Op(RegSource),
+ Dst: dst,
+ Src: src,
+ }
+}
+
+// Imm emits `dst (op) value`.
+func (op ALUOp) Imm(dst Register, value int32) Instruction {
+ return Instruction{
+ OpCode: op.Op(ImmSource),
+ Dst: dst,
+ Constant: int64(value),
+ }
+}
+
+// Op32 returns the OpCode for a 32-bit ALU operation with a given source.
+func (op ALUOp) Op32(source Source) OpCode {
+ return OpCode(ALUClass).SetALUOp(op).SetSource(source)
+}
+
+// Reg32 emits `dst (op) src`, zeroing the upper 32 bit of dst.
+func (op ALUOp) Reg32(dst, src Register) Instruction {
+ return Instruction{
+ OpCode: op.Op32(RegSource),
+ Dst: dst,
+ Src: src,
+ }
+}
+
+// Imm32 emits `dst (op) value`, zeroing the upper 32 bit of dst.
+func (op ALUOp) Imm32(dst Register, value int32) Instruction {
+ return Instruction{
+ OpCode: op.Op32(ImmSource),
+ Dst: dst,
+ Constant: int64(value),
+ }
+}
diff --git a/vendor/github.com/cilium/ebpf/asm/alu_string.go b/vendor/github.com/cilium/ebpf/asm/alu_string.go
new file mode 100644
index 000000000..72d3fe629
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/asm/alu_string.go
@@ -0,0 +1,107 @@
+// Code generated by "stringer -output alu_string.go -type=Source,Endianness,ALUOp"; DO NOT EDIT.
+
+package asm
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[InvalidSource-255]
+ _ = x[ImmSource-0]
+ _ = x[RegSource-8]
+}
+
+const (
+ _Source_name_0 = "ImmSource"
+ _Source_name_1 = "RegSource"
+ _Source_name_2 = "InvalidSource"
+)
+
+func (i Source) String() string {
+ switch {
+ case i == 0:
+ return _Source_name_0
+ case i == 8:
+ return _Source_name_1
+ case i == 255:
+ return _Source_name_2
+ default:
+ return "Source(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+}
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[InvalidEndian-255]
+ _ = x[LE-0]
+ _ = x[BE-8]
+}
+
+const (
+ _Endianness_name_0 = "LE"
+ _Endianness_name_1 = "BE"
+ _Endianness_name_2 = "InvalidEndian"
+)
+
+func (i Endianness) String() string {
+ switch {
+ case i == 0:
+ return _Endianness_name_0
+ case i == 8:
+ return _Endianness_name_1
+ case i == 255:
+ return _Endianness_name_2
+ default:
+ return "Endianness(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+}
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[InvalidALUOp-255]
+ _ = x[Add-0]
+ _ = x[Sub-16]
+ _ = x[Mul-32]
+ _ = x[Div-48]
+ _ = x[Or-64]
+ _ = x[And-80]
+ _ = x[LSh-96]
+ _ = x[RSh-112]
+ _ = x[Neg-128]
+ _ = x[Mod-144]
+ _ = x[Xor-160]
+ _ = x[Mov-176]
+ _ = x[ArSh-192]
+ _ = x[Swap-208]
+}
+
+const _ALUOp_name = "AddSubMulDivOrAndLShRShNegModXorMovArShSwapInvalidALUOp"
+
+var _ALUOp_map = map[ALUOp]string{
+ 0: _ALUOp_name[0:3],
+ 16: _ALUOp_name[3:6],
+ 32: _ALUOp_name[6:9],
+ 48: _ALUOp_name[9:12],
+ 64: _ALUOp_name[12:14],
+ 80: _ALUOp_name[14:17],
+ 96: _ALUOp_name[17:20],
+ 112: _ALUOp_name[20:23],
+ 128: _ALUOp_name[23:26],
+ 144: _ALUOp_name[26:29],
+ 160: _ALUOp_name[29:32],
+ 176: _ALUOp_name[32:35],
+ 192: _ALUOp_name[35:39],
+ 208: _ALUOp_name[39:43],
+ 255: _ALUOp_name[43:55],
+}
+
+func (i ALUOp) String() string {
+ if str, ok := _ALUOp_map[i]; ok {
+ return str
+ }
+ return "ALUOp(" + strconv.FormatInt(int64(i), 10) + ")"
+}
diff --git a/vendor/github.com/cilium/ebpf/asm/doc.go b/vendor/github.com/cilium/ebpf/asm/doc.go
new file mode 100644
index 000000000..7031bdc27
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/asm/doc.go
@@ -0,0 +1,2 @@
+// Package asm is an assembler for eBPF bytecode.
+package asm
diff --git a/vendor/github.com/cilium/ebpf/asm/func.go b/vendor/github.com/cilium/ebpf/asm/func.go
new file mode 100644
index 000000000..84a40b227
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/asm/func.go
@@ -0,0 +1,250 @@
+package asm
+
+//go:generate go run golang.org/x/tools/cmd/stringer@latest -output func_string.go -type=BuiltinFunc
+
+// BuiltinFunc is a built-in eBPF function.
+type BuiltinFunc int32
+
+func (_ BuiltinFunc) Max() BuiltinFunc {
+ return maxBuiltinFunc - 1
+}
+
+// eBPF built-in functions
+//
+// You can regenerate this list using the following gawk script:
+//
+// /FN\(.+\),/ {
+// match($1, /\(([a-z_0-9]+),/, r)
+// split(r[1], p, "_")
+// printf "Fn"
+// for (i in p) {
+// printf "%s%s", toupper(substr(p[i], 1, 1)), substr(p[i], 2)
+// }
+// print ""
+// }
+//
+// The script expects include/uapi/linux/bpf.h as it's input.
+const (
+ FnUnspec BuiltinFunc = iota
+ FnMapLookupElem
+ FnMapUpdateElem
+ FnMapDeleteElem
+ FnProbeRead
+ FnKtimeGetNs
+ FnTracePrintk
+ FnGetPrandomU32
+ FnGetSmpProcessorId
+ FnSkbStoreBytes
+ FnL3CsumReplace
+ FnL4CsumReplace
+ FnTailCall
+ FnCloneRedirect
+ FnGetCurrentPidTgid
+ FnGetCurrentUidGid
+ FnGetCurrentComm
+ FnGetCgroupClassid
+ FnSkbVlanPush
+ FnSkbVlanPop
+ FnSkbGetTunnelKey
+ FnSkbSetTunnelKey
+ FnPerfEventRead
+ FnRedirect
+ FnGetRouteRealm
+ FnPerfEventOutput
+ FnSkbLoadBytes
+ FnGetStackid
+ FnCsumDiff
+ FnSkbGetTunnelOpt
+ FnSkbSetTunnelOpt
+ FnSkbChangeProto
+ FnSkbChangeType
+ FnSkbUnderCgroup
+ FnGetHashRecalc
+ FnGetCurrentTask
+ FnProbeWriteUser
+ FnCurrentTaskUnderCgroup
+ FnSkbChangeTail
+ FnSkbPullData
+ FnCsumUpdate
+ FnSetHashInvalid
+ FnGetNumaNodeId
+ FnSkbChangeHead
+ FnXdpAdjustHead
+ FnProbeReadStr
+ FnGetSocketCookie
+ FnGetSocketUid
+ FnSetHash
+ FnSetsockopt
+ FnSkbAdjustRoom
+ FnRedirectMap
+ FnSkRedirectMap
+ FnSockMapUpdate
+ FnXdpAdjustMeta
+ FnPerfEventReadValue
+ FnPerfProgReadValue
+ FnGetsockopt
+ FnOverrideReturn
+ FnSockOpsCbFlagsSet
+ FnMsgRedirectMap
+ FnMsgApplyBytes
+ FnMsgCorkBytes
+ FnMsgPullData
+ FnBind
+ FnXdpAdjustTail
+ FnSkbGetXfrmState
+ FnGetStack
+ FnSkbLoadBytesRelative
+ FnFibLookup
+ FnSockHashUpdate
+ FnMsgRedirectHash
+ FnSkRedirectHash
+ FnLwtPushEncap
+ FnLwtSeg6StoreBytes
+ FnLwtSeg6AdjustSrh
+ FnLwtSeg6Action
+ FnRcRepeat
+ FnRcKeydown
+ FnSkbCgroupId
+ FnGetCurrentCgroupId
+ FnGetLocalStorage
+ FnSkSelectReuseport
+ FnSkbAncestorCgroupId
+ FnSkLookupTcp
+ FnSkLookupUdp
+ FnSkRelease
+ FnMapPushElem
+ FnMapPopElem
+ FnMapPeekElem
+ FnMsgPushData
+ FnMsgPopData
+ FnRcPointerRel
+ FnSpinLock
+ FnSpinUnlock
+ FnSkFullsock
+ FnTcpSock
+ FnSkbEcnSetCe
+ FnGetListenerSock
+ FnSkcLookupTcp
+ FnTcpCheckSyncookie
+ FnSysctlGetName
+ FnSysctlGetCurrentValue
+ FnSysctlGetNewValue
+ FnSysctlSetNewValue
+ FnStrtol
+ FnStrtoul
+ FnSkStorageGet
+ FnSkStorageDelete
+ FnSendSignal
+ FnTcpGenSyncookie
+ FnSkbOutput
+ FnProbeReadUser
+ FnProbeReadKernel
+ FnProbeReadUserStr
+ FnProbeReadKernelStr
+ FnTcpSendAck
+ FnSendSignalThread
+ FnJiffies64
+ FnReadBranchRecords
+ FnGetNsCurrentPidTgid
+ FnXdpOutput
+ FnGetNetnsCookie
+ FnGetCurrentAncestorCgroupId
+ FnSkAssign
+ FnKtimeGetBootNs
+ FnSeqPrintf
+ FnSeqWrite
+ FnSkCgroupId
+ FnSkAncestorCgroupId
+ FnRingbufOutput
+ FnRingbufReserve
+ FnRingbufSubmit
+ FnRingbufDiscard
+ FnRingbufQuery
+ FnCsumLevel
+ FnSkcToTcp6Sock
+ FnSkcToTcpSock
+ FnSkcToTcpTimewaitSock
+ FnSkcToTcpRequestSock
+ FnSkcToUdp6Sock
+ FnGetTaskStack
+ FnLoadHdrOpt
+ FnStoreHdrOpt
+ FnReserveHdrOpt
+ FnInodeStorageGet
+ FnInodeStorageDelete
+ FnDPath
+ FnCopyFromUser
+ FnSnprintfBtf
+ FnSeqPrintfBtf
+ FnSkbCgroupClassid
+ FnRedirectNeigh
+ FnPerCpuPtr
+ FnThisCpuPtr
+ FnRedirectPeer
+ FnTaskStorageGet
+ FnTaskStorageDelete
+ FnGetCurrentTaskBtf
+ FnBprmOptsSet
+ FnKtimeGetCoarseNs
+ FnImaInodeHash
+ FnSockFromFile
+ FnCheckMtu
+ FnForEachMapElem
+ FnSnprintf
+ FnSysBpf
+ FnBtfFindByNameKind
+ FnSysClose
+ FnTimerInit
+ FnTimerSetCallback
+ FnTimerStart
+ FnTimerCancel
+ FnGetFuncIp
+ FnGetAttachCookie
+ FnTaskPtRegs
+ FnGetBranchSnapshot
+ FnTraceVprintk
+ FnSkcToUnixSock
+ FnKallsymsLookupName
+ FnFindVma
+ FnLoop
+ FnStrncmp
+ FnGetFuncArg
+ FnGetFuncRet
+ FnGetFuncArgCnt
+ FnGetRetval
+ FnSetRetval
+ FnXdpGetBuffLen
+ FnXdpLoadBytes
+ FnXdpStoreBytes
+ FnCopyFromUserTask
+ FnSkbSetTstamp
+ FnImaFileHash
+ FnKptrXchg
+ FnMapLookupPercpuElem
+ FnSkcToMptcpSock
+ FnDynptrFromMem
+ FnRingbufReserveDynptr
+ FnRingbufSubmitDynptr
+ FnRingbufDiscardDynptr
+ FnDynptrRead
+ FnDynptrWrite
+ FnDynptrData
+ FnTcpRawGenSyncookieIpv4
+ FnTcpRawGenSyncookieIpv6
+ FnTcpRawCheckSyncookieIpv4
+ FnTcpRawCheckSyncookieIpv6
+ FnKtimeGetTaiNs
+ FnUserRingbufDrain
+ FnCgrpStorageGet
+ FnCgrpStorageDelete
+
+ maxBuiltinFunc
+)
+
+// Call emits a function call.
+func (fn BuiltinFunc) Call() Instruction {
+ return Instruction{
+ OpCode: OpCode(JumpClass).SetJumpOp(Call),
+ Constant: int64(fn),
+ }
+}
diff --git a/vendor/github.com/cilium/ebpf/asm/func_string.go b/vendor/github.com/cilium/ebpf/asm/func_string.go
new file mode 100644
index 000000000..47150bc4f
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/asm/func_string.go
@@ -0,0 +1,235 @@
+// Code generated by "stringer -output func_string.go -type=BuiltinFunc"; DO NOT EDIT.
+
+package asm
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[FnUnspec-0]
+ _ = x[FnMapLookupElem-1]
+ _ = x[FnMapUpdateElem-2]
+ _ = x[FnMapDeleteElem-3]
+ _ = x[FnProbeRead-4]
+ _ = x[FnKtimeGetNs-5]
+ _ = x[FnTracePrintk-6]
+ _ = x[FnGetPrandomU32-7]
+ _ = x[FnGetSmpProcessorId-8]
+ _ = x[FnSkbStoreBytes-9]
+ _ = x[FnL3CsumReplace-10]
+ _ = x[FnL4CsumReplace-11]
+ _ = x[FnTailCall-12]
+ _ = x[FnCloneRedirect-13]
+ _ = x[FnGetCurrentPidTgid-14]
+ _ = x[FnGetCurrentUidGid-15]
+ _ = x[FnGetCurrentComm-16]
+ _ = x[FnGetCgroupClassid-17]
+ _ = x[FnSkbVlanPush-18]
+ _ = x[FnSkbVlanPop-19]
+ _ = x[FnSkbGetTunnelKey-20]
+ _ = x[FnSkbSetTunnelKey-21]
+ _ = x[FnPerfEventRead-22]
+ _ = x[FnRedirect-23]
+ _ = x[FnGetRouteRealm-24]
+ _ = x[FnPerfEventOutput-25]
+ _ = x[FnSkbLoadBytes-26]
+ _ = x[FnGetStackid-27]
+ _ = x[FnCsumDiff-28]
+ _ = x[FnSkbGetTunnelOpt-29]
+ _ = x[FnSkbSetTunnelOpt-30]
+ _ = x[FnSkbChangeProto-31]
+ _ = x[FnSkbChangeType-32]
+ _ = x[FnSkbUnderCgroup-33]
+ _ = x[FnGetHashRecalc-34]
+ _ = x[FnGetCurrentTask-35]
+ _ = x[FnProbeWriteUser-36]
+ _ = x[FnCurrentTaskUnderCgroup-37]
+ _ = x[FnSkbChangeTail-38]
+ _ = x[FnSkbPullData-39]
+ _ = x[FnCsumUpdate-40]
+ _ = x[FnSetHashInvalid-41]
+ _ = x[FnGetNumaNodeId-42]
+ _ = x[FnSkbChangeHead-43]
+ _ = x[FnXdpAdjustHead-44]
+ _ = x[FnProbeReadStr-45]
+ _ = x[FnGetSocketCookie-46]
+ _ = x[FnGetSocketUid-47]
+ _ = x[FnSetHash-48]
+ _ = x[FnSetsockopt-49]
+ _ = x[FnSkbAdjustRoom-50]
+ _ = x[FnRedirectMap-51]
+ _ = x[FnSkRedirectMap-52]
+ _ = x[FnSockMapUpdate-53]
+ _ = x[FnXdpAdjustMeta-54]
+ _ = x[FnPerfEventReadValue-55]
+ _ = x[FnPerfProgReadValue-56]
+ _ = x[FnGetsockopt-57]
+ _ = x[FnOverrideReturn-58]
+ _ = x[FnSockOpsCbFlagsSet-59]
+ _ = x[FnMsgRedirectMap-60]
+ _ = x[FnMsgApplyBytes-61]
+ _ = x[FnMsgCorkBytes-62]
+ _ = x[FnMsgPullData-63]
+ _ = x[FnBind-64]
+ _ = x[FnXdpAdjustTail-65]
+ _ = x[FnSkbGetXfrmState-66]
+ _ = x[FnGetStack-67]
+ _ = x[FnSkbLoadBytesRelative-68]
+ _ = x[FnFibLookup-69]
+ _ = x[FnSockHashUpdate-70]
+ _ = x[FnMsgRedirectHash-71]
+ _ = x[FnSkRedirectHash-72]
+ _ = x[FnLwtPushEncap-73]
+ _ = x[FnLwtSeg6StoreBytes-74]
+ _ = x[FnLwtSeg6AdjustSrh-75]
+ _ = x[FnLwtSeg6Action-76]
+ _ = x[FnRcRepeat-77]
+ _ = x[FnRcKeydown-78]
+ _ = x[FnSkbCgroupId-79]
+ _ = x[FnGetCurrentCgroupId-80]
+ _ = x[FnGetLocalStorage-81]
+ _ = x[FnSkSelectReuseport-82]
+ _ = x[FnSkbAncestorCgroupId-83]
+ _ = x[FnSkLookupTcp-84]
+ _ = x[FnSkLookupUdp-85]
+ _ = x[FnSkRelease-86]
+ _ = x[FnMapPushElem-87]
+ _ = x[FnMapPopElem-88]
+ _ = x[FnMapPeekElem-89]
+ _ = x[FnMsgPushData-90]
+ _ = x[FnMsgPopData-91]
+ _ = x[FnRcPointerRel-92]
+ _ = x[FnSpinLock-93]
+ _ = x[FnSpinUnlock-94]
+ _ = x[FnSkFullsock-95]
+ _ = x[FnTcpSock-96]
+ _ = x[FnSkbEcnSetCe-97]
+ _ = x[FnGetListenerSock-98]
+ _ = x[FnSkcLookupTcp-99]
+ _ = x[FnTcpCheckSyncookie-100]
+ _ = x[FnSysctlGetName-101]
+ _ = x[FnSysctlGetCurrentValue-102]
+ _ = x[FnSysctlGetNewValue-103]
+ _ = x[FnSysctlSetNewValue-104]
+ _ = x[FnStrtol-105]
+ _ = x[FnStrtoul-106]
+ _ = x[FnSkStorageGet-107]
+ _ = x[FnSkStorageDelete-108]
+ _ = x[FnSendSignal-109]
+ _ = x[FnTcpGenSyncookie-110]
+ _ = x[FnSkbOutput-111]
+ _ = x[FnProbeReadUser-112]
+ _ = x[FnProbeReadKernel-113]
+ _ = x[FnProbeReadUserStr-114]
+ _ = x[FnProbeReadKernelStr-115]
+ _ = x[FnTcpSendAck-116]
+ _ = x[FnSendSignalThread-117]
+ _ = x[FnJiffies64-118]
+ _ = x[FnReadBranchRecords-119]
+ _ = x[FnGetNsCurrentPidTgid-120]
+ _ = x[FnXdpOutput-121]
+ _ = x[FnGetNetnsCookie-122]
+ _ = x[FnGetCurrentAncestorCgroupId-123]
+ _ = x[FnSkAssign-124]
+ _ = x[FnKtimeGetBootNs-125]
+ _ = x[FnSeqPrintf-126]
+ _ = x[FnSeqWrite-127]
+ _ = x[FnSkCgroupId-128]
+ _ = x[FnSkAncestorCgroupId-129]
+ _ = x[FnRingbufOutput-130]
+ _ = x[FnRingbufReserve-131]
+ _ = x[FnRingbufSubmit-132]
+ _ = x[FnRingbufDiscard-133]
+ _ = x[FnRingbufQuery-134]
+ _ = x[FnCsumLevel-135]
+ _ = x[FnSkcToTcp6Sock-136]
+ _ = x[FnSkcToTcpSock-137]
+ _ = x[FnSkcToTcpTimewaitSock-138]
+ _ = x[FnSkcToTcpRequestSock-139]
+ _ = x[FnSkcToUdp6Sock-140]
+ _ = x[FnGetTaskStack-141]
+ _ = x[FnLoadHdrOpt-142]
+ _ = x[FnStoreHdrOpt-143]
+ _ = x[FnReserveHdrOpt-144]
+ _ = x[FnInodeStorageGet-145]
+ _ = x[FnInodeStorageDelete-146]
+ _ = x[FnDPath-147]
+ _ = x[FnCopyFromUser-148]
+ _ = x[FnSnprintfBtf-149]
+ _ = x[FnSeqPrintfBtf-150]
+ _ = x[FnSkbCgroupClassid-151]
+ _ = x[FnRedirectNeigh-152]
+ _ = x[FnPerCpuPtr-153]
+ _ = x[FnThisCpuPtr-154]
+ _ = x[FnRedirectPeer-155]
+ _ = x[FnTaskStorageGet-156]
+ _ = x[FnTaskStorageDelete-157]
+ _ = x[FnGetCurrentTaskBtf-158]
+ _ = x[FnBprmOptsSet-159]
+ _ = x[FnKtimeGetCoarseNs-160]
+ _ = x[FnImaInodeHash-161]
+ _ = x[FnSockFromFile-162]
+ _ = x[FnCheckMtu-163]
+ _ = x[FnForEachMapElem-164]
+ _ = x[FnSnprintf-165]
+ _ = x[FnSysBpf-166]
+ _ = x[FnBtfFindByNameKind-167]
+ _ = x[FnSysClose-168]
+ _ = x[FnTimerInit-169]
+ _ = x[FnTimerSetCallback-170]
+ _ = x[FnTimerStart-171]
+ _ = x[FnTimerCancel-172]
+ _ = x[FnGetFuncIp-173]
+ _ = x[FnGetAttachCookie-174]
+ _ = x[FnTaskPtRegs-175]
+ _ = x[FnGetBranchSnapshot-176]
+ _ = x[FnTraceVprintk-177]
+ _ = x[FnSkcToUnixSock-178]
+ _ = x[FnKallsymsLookupName-179]
+ _ = x[FnFindVma-180]
+ _ = x[FnLoop-181]
+ _ = x[FnStrncmp-182]
+ _ = x[FnGetFuncArg-183]
+ _ = x[FnGetFuncRet-184]
+ _ = x[FnGetFuncArgCnt-185]
+ _ = x[FnGetRetval-186]
+ _ = x[FnSetRetval-187]
+ _ = x[FnXdpGetBuffLen-188]
+ _ = x[FnXdpLoadBytes-189]
+ _ = x[FnXdpStoreBytes-190]
+ _ = x[FnCopyFromUserTask-191]
+ _ = x[FnSkbSetTstamp-192]
+ _ = x[FnImaFileHash-193]
+ _ = x[FnKptrXchg-194]
+ _ = x[FnMapLookupPercpuElem-195]
+ _ = x[FnSkcToMptcpSock-196]
+ _ = x[FnDynptrFromMem-197]
+ _ = x[FnRingbufReserveDynptr-198]
+ _ = x[FnRingbufSubmitDynptr-199]
+ _ = x[FnRingbufDiscardDynptr-200]
+ _ = x[FnDynptrRead-201]
+ _ = x[FnDynptrWrite-202]
+ _ = x[FnDynptrData-203]
+ _ = x[FnTcpRawGenSyncookieIpv4-204]
+ _ = x[FnTcpRawGenSyncookieIpv6-205]
+ _ = x[FnTcpRawCheckSyncookieIpv4-206]
+ _ = x[FnTcpRawCheckSyncookieIpv6-207]
+ _ = x[FnKtimeGetTaiNs-208]
+ _ = x[FnUserRingbufDrain-209]
+ _ = x[FnCgrpStorageGet-210]
+ _ = x[FnCgrpStorageDelete-211]
+ _ = x[maxBuiltinFunc-212]
+}
+
+const _BuiltinFunc_name = "FnUnspecFnMapLookupElemFnMapUpdateElemFnMapDeleteElemFnProbeReadFnKtimeGetNsFnTracePrintkFnGetPrandomU32FnGetSmpProcessorIdFnSkbStoreBytesFnL3CsumReplaceFnL4CsumReplaceFnTailCallFnCloneRedirectFnGetCurrentPidTgidFnGetCurrentUidGidFnGetCurrentCommFnGetCgroupClassidFnSkbVlanPushFnSkbVlanPopFnSkbGetTunnelKeyFnSkbSetTunnelKeyFnPerfEventReadFnRedirectFnGetRouteRealmFnPerfEventOutputFnSkbLoadBytesFnGetStackidFnCsumDiffFnSkbGetTunnelOptFnSkbSetTunnelOptFnSkbChangeProtoFnSkbChangeTypeFnSkbUnderCgroupFnGetHashRecalcFnGetCurrentTaskFnProbeWriteUserFnCurrentTaskUnderCgroupFnSkbChangeTailFnSkbPullDataFnCsumUpdateFnSetHashInvalidFnGetNumaNodeIdFnSkbChangeHeadFnXdpAdjustHeadFnProbeReadStrFnGetSocketCookieFnGetSocketUidFnSetHashFnSetsockoptFnSkbAdjustRoomFnRedirectMapFnSkRedirectMapFnSockMapUpdateFnXdpAdjustMetaFnPerfEventReadValueFnPerfProgReadValueFnGetsockoptFnOverrideReturnFnSockOpsCbFlagsSetFnMsgRedirectMapFnMsgApplyBytesFnMsgCorkBytesFnMsgPullDataFnBindFnXdpAdjustTailFnSkbGetXfrmStateFnGetStackFnSkbLoadBytesRelativeFnFibLookupFnSockHashUpdateFnMsgRedirectHashFnSkRedirectHashFnLwtPushEncapFnLwtSeg6StoreBytesFnLwtSeg6AdjustSrhFnLwtSeg6ActionFnRcRepeatFnRcKeydownFnSkbCgroupIdFnGetCurrentCgroupIdFnGetLocalStorageFnSkSelectReuseportFnSkbAncestorCgroupIdFnSkLookupTcpFnSkLookupUdpFnSkReleaseFnMapPushElemFnMapPopElemFnMapPeekElemFnMsgPushDataFnMsgPopDataFnRcPointerRelFnSpinLockFnSpinUnlockFnSkFullsockFnTcpSockFnSkbEcnSetCeFnGetListenerSockFnSkcLookupTcpFnTcpCheckSyncookieFnSysctlGetNameFnSysctlGetCurrentValueFnSysctlGetNewValueFnSysctlSetNewValueFnStrtolFnStrtoulFnSkStorageGetFnSkStorageDeleteFnSendSignalFnTcpGenSyncookieFnSkbOutputFnProbeReadUserFnProbeReadKernelFnProbeReadUserStrFnProbeReadKernelStrFnTcpSendAckFnSendSignalThreadFnJiffies64FnReadBranchRecordsFnGetNsCurrentPidTgidFnXdpOutputFnGetNetnsCookieFnGetCurrentAncestorCgroupIdFnSkAssignFnKtimeGetBootNsFnSeqPrintfFnSeqWriteFnSkCgroupIdFnSkAncestorCgroupIdFnRingbufOutputFnRingbufReserveFnRingbufSubmitFnRingbufDiscardFnRingbufQueryFnCsumLevelFnSkcToTcp6SockFnSkcToTcpSockFnSkcToTcpTimewaitSockFnSkcToTcpRequestSockFnSkcToUdp6SockFnGetTaskStackFnLoadHdrOptFnStoreHdrOptFnReserveHdrOptFnInodeStorageGetFnInodeStorageDeleteFnDPathFnCopyFromUserFnSnprintfBtfFnSeqPrintfBtfFnSkbCgroupClassidFnRedirectNeighFnPerCpuPtrFnThisCpuPtrFnRedirectPeerFnTaskStorageGetFnTaskStorageDeleteFnGetCurrentTaskBtfFnBprmOptsSetFnKtimeGetCoarseNsFnImaInodeHashFnSockFromFileFnCheckMtuFnForEachMapElemFnSnprintfFnSysBpfFnBtfFindByNameKindFnSysCloseFnTimerInitFnTimerSetCallbackFnTimerStartFnTimerCancelFnGetFuncIpFnGetAttachCookieFnTaskPtRegsFnGetBranchSnapshotFnTraceVprintkFnSkcToUnixSockFnKallsymsLookupNameFnFindVmaFnLoopFnStrncmpFnGetFuncArgFnGetFuncRetFnGetFuncArgCntFnGetRetvalFnSetRetvalFnXdpGetBuffLenFnXdpLoadBytesFnXdpStoreBytesFnCopyFromUserTaskFnSkbSetTstampFnImaFileHashFnKptrXchgFnMapLookupPercpuElemFnSkcToMptcpSockFnDynptrFromMemFnRingbufReserveDynptrFnRingbufSubmitDynptrFnRingbufDiscardDynptrFnDynptrReadFnDynptrWriteFnDynptrDataFnTcpRawGenSyncookieIpv4FnTcpRawGenSyncookieIpv6FnTcpRawCheckSyncookieIpv4FnTcpRawCheckSyncookieIpv6FnKtimeGetTaiNsFnUserRingbufDrainFnCgrpStorageGetFnCgrpStorageDeletemaxBuiltinFunc"
+
+var _BuiltinFunc_index = [...]uint16{0, 8, 23, 38, 53, 64, 76, 89, 104, 123, 138, 153, 168, 178, 193, 212, 230, 246, 264, 277, 289, 306, 323, 338, 348, 363, 380, 394, 406, 416, 433, 450, 466, 481, 497, 512, 528, 544, 568, 583, 596, 608, 624, 639, 654, 669, 683, 700, 714, 723, 735, 750, 763, 778, 793, 808, 828, 847, 859, 875, 894, 910, 925, 939, 952, 958, 973, 990, 1000, 1022, 1033, 1049, 1066, 1082, 1096, 1115, 1133, 1148, 1158, 1169, 1182, 1202, 1219, 1238, 1259, 1272, 1285, 1296, 1309, 1321, 1334, 1347, 1359, 1373, 1383, 1395, 1407, 1416, 1429, 1446, 1460, 1479, 1494, 1517, 1536, 1555, 1563, 1572, 1586, 1603, 1615, 1632, 1643, 1658, 1675, 1693, 1713, 1725, 1743, 1754, 1773, 1794, 1805, 1821, 1849, 1859, 1875, 1886, 1896, 1908, 1928, 1943, 1959, 1974, 1990, 2004, 2015, 2030, 2044, 2066, 2087, 2102, 2116, 2128, 2141, 2156, 2173, 2193, 2200, 2214, 2227, 2241, 2259, 2274, 2285, 2297, 2311, 2327, 2346, 2365, 2378, 2396, 2410, 2424, 2434, 2450, 2460, 2468, 2487, 2497, 2508, 2526, 2538, 2551, 2562, 2579, 2591, 2610, 2624, 2639, 2659, 2668, 2674, 2683, 2695, 2707, 2722, 2733, 2744, 2759, 2773, 2788, 2806, 2820, 2833, 2843, 2864, 2880, 2895, 2917, 2938, 2960, 2972, 2985, 2997, 3021, 3045, 3071, 3097, 3112, 3130, 3146, 3165, 3179}
+
+func (i BuiltinFunc) String() string {
+ if i < 0 || i >= BuiltinFunc(len(_BuiltinFunc_index)-1) {
+ return "BuiltinFunc(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _BuiltinFunc_name[_BuiltinFunc_index[i]:_BuiltinFunc_index[i+1]]
+}
diff --git a/vendor/github.com/cilium/ebpf/asm/instruction.go b/vendor/github.com/cilium/ebpf/asm/instruction.go
new file mode 100644
index 000000000..ef01eaa35
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/asm/instruction.go
@@ -0,0 +1,877 @@
+package asm
+
+import (
+ "crypto/sha1"
+ "encoding/binary"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "sort"
+ "strings"
+
+ "github.com/cilium/ebpf/internal/sys"
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+// InstructionSize is the size of a BPF instruction in bytes
+const InstructionSize = 8
+
+// RawInstructionOffset is an offset in units of raw BPF instructions.
+type RawInstructionOffset uint64
+
+var ErrUnreferencedSymbol = errors.New("unreferenced symbol")
+var ErrUnsatisfiedMapReference = errors.New("unsatisfied map reference")
+var ErrUnsatisfiedProgramReference = errors.New("unsatisfied program reference")
+
+// Bytes returns the offset of an instruction in bytes.
+func (rio RawInstructionOffset) Bytes() uint64 {
+ return uint64(rio) * InstructionSize
+}
+
+// Instruction is a single eBPF instruction.
+type Instruction struct {
+ OpCode OpCode
+ Dst Register
+ Src Register
+ Offset int16
+ Constant int64
+
+ // Metadata contains optional metadata about this instruction.
+ Metadata Metadata
+}
+
+// Unmarshal decodes a BPF instruction.
+func (ins *Instruction) Unmarshal(r io.Reader, bo binary.ByteOrder) (uint64, error) {
+ data := make([]byte, InstructionSize)
+ if _, err := io.ReadFull(r, data); err != nil {
+ return 0, err
+ }
+
+ ins.OpCode = OpCode(data[0])
+
+ regs := data[1]
+ switch bo {
+ case binary.LittleEndian:
+ ins.Dst, ins.Src = Register(regs&0xF), Register(regs>>4)
+ case binary.BigEndian:
+ ins.Dst, ins.Src = Register(regs>>4), Register(regs&0xf)
+ }
+
+ ins.Offset = int16(bo.Uint16(data[2:4]))
+ // Convert to int32 before widening to int64
+ // to ensure the signed bit is carried over.
+ ins.Constant = int64(int32(bo.Uint32(data[4:8])))
+
+ if !ins.OpCode.IsDWordLoad() {
+ return InstructionSize, nil
+ }
+
+ // Pull another instruction from the stream to retrieve the second
+ // half of the 64-bit immediate value.
+ if _, err := io.ReadFull(r, data); err != nil {
+ // No Wrap, to avoid io.EOF clash
+ return 0, errors.New("64bit immediate is missing second half")
+ }
+
+ // Require that all fields other than the value are zero.
+ if bo.Uint32(data[0:4]) != 0 {
+ return 0, errors.New("64bit immediate has non-zero fields")
+ }
+
+ cons1 := uint32(ins.Constant)
+ cons2 := int32(bo.Uint32(data[4:8]))
+ ins.Constant = int64(cons2)<<32 | int64(cons1)
+
+ return 2 * InstructionSize, nil
+}
+
+// Marshal encodes a BPF instruction.
+func (ins Instruction) Marshal(w io.Writer, bo binary.ByteOrder) (uint64, error) {
+ if ins.OpCode == InvalidOpCode {
+ return 0, errors.New("invalid opcode")
+ }
+
+ isDWordLoad := ins.OpCode.IsDWordLoad()
+
+ cons := int32(ins.Constant)
+ if isDWordLoad {
+ // Encode least significant 32bit first for 64bit operations.
+ cons = int32(uint32(ins.Constant))
+ }
+
+ regs, err := newBPFRegisters(ins.Dst, ins.Src, bo)
+ if err != nil {
+ return 0, fmt.Errorf("can't marshal registers: %s", err)
+ }
+
+ data := make([]byte, InstructionSize)
+ data[0] = byte(ins.OpCode)
+ data[1] = byte(regs)
+ bo.PutUint16(data[2:4], uint16(ins.Offset))
+ bo.PutUint32(data[4:8], uint32(cons))
+ if _, err := w.Write(data); err != nil {
+ return 0, err
+ }
+
+ if !isDWordLoad {
+ return InstructionSize, nil
+ }
+
+ // The first half of the second part of a double-wide instruction
+ // must be zero. The second half carries the value.
+ bo.PutUint32(data[0:4], 0)
+ bo.PutUint32(data[4:8], uint32(ins.Constant>>32))
+ if _, err := w.Write(data); err != nil {
+ return 0, err
+ }
+
+ return 2 * InstructionSize, nil
+}
+
+// AssociateMap associates a Map with this Instruction.
+//
+// Implicitly clears the Instruction's Reference field.
+//
+// Returns an error if the Instruction is not a map load.
+func (ins *Instruction) AssociateMap(m FDer) error {
+ if !ins.IsLoadFromMap() {
+ return errors.New("not a load from a map")
+ }
+
+ ins.Metadata.Set(referenceMeta{}, nil)
+ ins.Metadata.Set(mapMeta{}, m)
+
+ return nil
+}
+
+// RewriteMapPtr changes an instruction to use a new map fd.
+//
+// Returns an error if the instruction doesn't load a map.
+//
+// Deprecated: use AssociateMap instead. If you cannot provide a Map,
+// wrap an fd in a type implementing FDer.
+func (ins *Instruction) RewriteMapPtr(fd int) error {
+ if !ins.IsLoadFromMap() {
+ return errors.New("not a load from a map")
+ }
+
+ ins.encodeMapFD(fd)
+
+ return nil
+}
+
+func (ins *Instruction) encodeMapFD(fd int) {
+ // Preserve the offset value for direct map loads.
+ offset := uint64(ins.Constant) & (math.MaxUint32 << 32)
+ rawFd := uint64(uint32(fd))
+ ins.Constant = int64(offset | rawFd)
+}
+
+// MapPtr returns the map fd for this instruction.
+//
+// The result is undefined if the instruction is not a load from a map,
+// see IsLoadFromMap.
+//
+// Deprecated: use Map() instead.
+func (ins *Instruction) MapPtr() int {
+ // If there is a map associated with the instruction, return its FD.
+ if fd := ins.Metadata.Get(mapMeta{}); fd != nil {
+ return fd.(FDer).FD()
+ }
+
+ // Fall back to the fd stored in the Constant field
+ return ins.mapFd()
+}
+
+// mapFd returns the map file descriptor stored in the 32 least significant
+// bits of ins' Constant field.
+func (ins *Instruction) mapFd() int {
+ return int(int32(ins.Constant))
+}
+
+// RewriteMapOffset changes the offset of a direct load from a map.
+//
+// Returns an error if the instruction is not a direct load.
+func (ins *Instruction) RewriteMapOffset(offset uint32) error {
+ if !ins.OpCode.IsDWordLoad() {
+ return fmt.Errorf("%s is not a 64 bit load", ins.OpCode)
+ }
+
+ if ins.Src != PseudoMapValue {
+ return errors.New("not a direct load from a map")
+ }
+
+ fd := uint64(ins.Constant) & math.MaxUint32
+ ins.Constant = int64(uint64(offset)<<32 | fd)
+ return nil
+}
+
+func (ins *Instruction) mapOffset() uint32 {
+ return uint32(uint64(ins.Constant) >> 32)
+}
+
+// IsLoadFromMap returns true if the instruction loads from a map.
+//
+// This covers both loading the map pointer and direct map value loads.
+func (ins *Instruction) IsLoadFromMap() bool {
+ return ins.OpCode == LoadImmOp(DWord) && (ins.Src == PseudoMapFD || ins.Src == PseudoMapValue)
+}
+
+// IsFunctionCall returns true if the instruction calls another BPF function.
+//
+// This is not the same thing as a BPF helper call.
+func (ins *Instruction) IsFunctionCall() bool {
+ return ins.OpCode.JumpOp() == Call && ins.Src == PseudoCall
+}
+
+// IsKfuncCall returns true if the instruction calls a kfunc.
+//
+// This is not the same thing as a BPF helper call.
+func (ins *Instruction) IsKfuncCall() bool {
+ return ins.OpCode.JumpOp() == Call && ins.Src == PseudoKfuncCall
+}
+
+// IsLoadOfFunctionPointer returns true if the instruction loads a function pointer.
+func (ins *Instruction) IsLoadOfFunctionPointer() bool {
+ return ins.OpCode.IsDWordLoad() && ins.Src == PseudoFunc
+}
+
+// IsFunctionReference returns true if the instruction references another BPF
+// function, either by invoking a Call jump operation or by loading a function
+// pointer.
+func (ins *Instruction) IsFunctionReference() bool {
+ return ins.IsFunctionCall() || ins.IsLoadOfFunctionPointer()
+}
+
+// IsBuiltinCall returns true if the instruction is a built-in call, i.e. BPF helper call.
+func (ins *Instruction) IsBuiltinCall() bool {
+ return ins.OpCode.JumpOp() == Call && ins.Src == R0 && ins.Dst == R0
+}
+
+// IsConstantLoad returns true if the instruction loads a constant of the
+// given size.
+func (ins *Instruction) IsConstantLoad(size Size) bool {
+ return ins.OpCode == LoadImmOp(size) && ins.Src == R0 && ins.Offset == 0
+}
+
+// Format implements fmt.Formatter.
+func (ins Instruction) Format(f fmt.State, c rune) {
+ if c != 'v' {
+ fmt.Fprintf(f, "{UNRECOGNIZED: %c}", c)
+ return
+ }
+
+ op := ins.OpCode
+
+ if op == InvalidOpCode {
+ fmt.Fprint(f, "INVALID")
+ return
+ }
+
+ // Omit trailing space for Exit
+ if op.JumpOp() == Exit {
+ fmt.Fprint(f, op)
+ return
+ }
+
+ if ins.IsLoadFromMap() {
+ fd := ins.mapFd()
+ m := ins.Map()
+ switch ins.Src {
+ case PseudoMapFD:
+ if m != nil {
+ fmt.Fprintf(f, "LoadMapPtr dst: %s map: %s", ins.Dst, m)
+ } else {
+ fmt.Fprintf(f, "LoadMapPtr dst: %s fd: %d", ins.Dst, fd)
+ }
+
+ case PseudoMapValue:
+ if m != nil {
+ fmt.Fprintf(f, "LoadMapValue dst: %s, map: %s off: %d", ins.Dst, m, ins.mapOffset())
+ } else {
+ fmt.Fprintf(f, "LoadMapValue dst: %s, fd: %d off: %d", ins.Dst, fd, ins.mapOffset())
+ }
+ }
+
+ goto ref
+ }
+
+ fmt.Fprintf(f, "%v ", op)
+ switch cls := op.Class(); {
+ case cls.isLoadOrStore():
+ switch op.Mode() {
+ case ImmMode:
+ fmt.Fprintf(f, "dst: %s imm: %d", ins.Dst, ins.Constant)
+ case AbsMode:
+ fmt.Fprintf(f, "imm: %d", ins.Constant)
+ case IndMode:
+ fmt.Fprintf(f, "dst: %s src: %s imm: %d", ins.Dst, ins.Src, ins.Constant)
+ case MemMode:
+ fmt.Fprintf(f, "dst: %s src: %s off: %d imm: %d", ins.Dst, ins.Src, ins.Offset, ins.Constant)
+ case XAddMode:
+ fmt.Fprintf(f, "dst: %s src: %s", ins.Dst, ins.Src)
+ }
+
+ case cls.IsALU():
+ fmt.Fprintf(f, "dst: %s ", ins.Dst)
+ if op.ALUOp() == Swap || op.Source() == ImmSource {
+ fmt.Fprintf(f, "imm: %d", ins.Constant)
+ } else {
+ fmt.Fprintf(f, "src: %s", ins.Src)
+ }
+
+ case cls.IsJump():
+ switch jop := op.JumpOp(); jop {
+ case Call:
+ switch ins.Src {
+ case PseudoCall:
+ // bpf-to-bpf call
+ fmt.Fprint(f, ins.Constant)
+ case PseudoKfuncCall:
+ // kfunc call
+ fmt.Fprintf(f, "Kfunc(%d)", ins.Constant)
+ default:
+ fmt.Fprint(f, BuiltinFunc(ins.Constant))
+ }
+
+ default:
+ fmt.Fprintf(f, "dst: %s off: %d ", ins.Dst, ins.Offset)
+ if op.Source() == ImmSource {
+ fmt.Fprintf(f, "imm: %d", ins.Constant)
+ } else {
+ fmt.Fprintf(f, "src: %s", ins.Src)
+ }
+ }
+ }
+
+ref:
+ if ins.Reference() != "" {
+ fmt.Fprintf(f, " <%s>", ins.Reference())
+ }
+}
+
+func (ins Instruction) equal(other Instruction) bool {
+ return ins.OpCode == other.OpCode &&
+ ins.Dst == other.Dst &&
+ ins.Src == other.Src &&
+ ins.Offset == other.Offset &&
+ ins.Constant == other.Constant
+}
+
+// Size returns the amount of bytes ins would occupy in binary form.
+func (ins Instruction) Size() uint64 {
+ return uint64(InstructionSize * ins.OpCode.rawInstructions())
+}
+
+// WithMetadata sets the given Metadata on the Instruction. e.g. to copy
+// Metadata from another Instruction when replacing it.
+func (ins Instruction) WithMetadata(meta Metadata) Instruction {
+ ins.Metadata = meta
+ return ins
+}
+
+type symbolMeta struct{}
+
+// WithSymbol marks the Instruction as a Symbol, which other Instructions
+// can point to using corresponding calls to WithReference.
+func (ins Instruction) WithSymbol(name string) Instruction {
+ ins.Metadata.Set(symbolMeta{}, name)
+ return ins
+}
+
+// Sym creates a symbol.
+//
+// Deprecated: use WithSymbol instead.
+func (ins Instruction) Sym(name string) Instruction {
+ return ins.WithSymbol(name)
+}
+
+// Symbol returns the value ins has been marked with using WithSymbol,
+// otherwise returns an empty string. A symbol is often an Instruction
+// at the start of a function body.
+func (ins Instruction) Symbol() string {
+ sym, _ := ins.Metadata.Get(symbolMeta{}).(string)
+ return sym
+}
+
+type referenceMeta struct{}
+
+// WithReference makes ins reference another Symbol or map by name.
+func (ins Instruction) WithReference(ref string) Instruction {
+ ins.Metadata.Set(referenceMeta{}, ref)
+ return ins
+}
+
+// Reference returns the Symbol or map name referenced by ins, if any.
+func (ins Instruction) Reference() string {
+ ref, _ := ins.Metadata.Get(referenceMeta{}).(string)
+ return ref
+}
+
+type mapMeta struct{}
+
+// Map returns the Map referenced by ins, if any.
+// An Instruction will contain a Map if e.g. it references an existing,
+// pinned map that was opened during ELF loading.
+func (ins Instruction) Map() FDer {
+ fd, _ := ins.Metadata.Get(mapMeta{}).(FDer)
+ return fd
+}
+
+type sourceMeta struct{}
+
+// WithSource adds source information about the Instruction.
+func (ins Instruction) WithSource(src fmt.Stringer) Instruction {
+ ins.Metadata.Set(sourceMeta{}, src)
+ return ins
+}
+
+// Source returns source information about the Instruction. The field is
+// present when the compiler emits BTF line info about the Instruction and
+// usually contains the line of source code responsible for it.
+func (ins Instruction) Source() fmt.Stringer {
+ str, _ := ins.Metadata.Get(sourceMeta{}).(fmt.Stringer)
+ return str
+}
+
+// A Comment can be passed to Instruction.WithSource to add a comment
+// to an instruction.
+type Comment string
+
+func (s Comment) String() string {
+ return string(s)
+}
+
+// FDer represents a resource tied to an underlying file descriptor.
+// Used as a stand-in for e.g. ebpf.Map since that type cannot be
+// imported here and FD() is the only method we rely on.
+type FDer interface {
+ FD() int
+}
+
+// Instructions is an eBPF program.
+type Instructions []Instruction
+
+// Unmarshal unmarshals an Instructions from a binary instruction stream.
+// All instructions in insns are replaced by instructions decoded from r.
+func (insns *Instructions) Unmarshal(r io.Reader, bo binary.ByteOrder) error {
+ if len(*insns) > 0 {
+ *insns = nil
+ }
+
+ var offset uint64
+ for {
+ var ins Instruction
+ n, err := ins.Unmarshal(r, bo)
+ if errors.Is(err, io.EOF) {
+ break
+ }
+ if err != nil {
+ return fmt.Errorf("offset %d: %w", offset, err)
+ }
+
+ *insns = append(*insns, ins)
+ offset += n
+ }
+
+ return nil
+}
+
+// Name returns the name of the function insns belongs to, if any.
+func (insns Instructions) Name() string {
+ if len(insns) == 0 {
+ return ""
+ }
+ return insns[0].Symbol()
+}
+
+func (insns Instructions) String() string {
+ return fmt.Sprint(insns)
+}
+
+// Size returns the amount of bytes insns would occupy in binary form.
+func (insns Instructions) Size() uint64 {
+ var sum uint64
+ for _, ins := range insns {
+ sum += ins.Size()
+ }
+ return sum
+}
+
+// AssociateMap updates all Instructions that Reference the given symbol
+// to point to an existing Map m instead.
+//
+// Returns ErrUnreferencedSymbol error if no references to symbol are found
+// in insns. If symbol is anything else than the symbol name of map (e.g.
+// a bpf2bpf subprogram), an error is returned.
+func (insns Instructions) AssociateMap(symbol string, m FDer) error {
+ if symbol == "" {
+ return errors.New("empty symbol")
+ }
+
+ var found bool
+ for i := range insns {
+ ins := &insns[i]
+ if ins.Reference() != symbol {
+ continue
+ }
+
+ if err := ins.AssociateMap(m); err != nil {
+ return err
+ }
+
+ found = true
+ }
+
+ if !found {
+ return fmt.Errorf("symbol %s: %w", symbol, ErrUnreferencedSymbol)
+ }
+
+ return nil
+}
+
+// RewriteMapPtr rewrites all loads of a specific map pointer to a new fd.
+//
+// Returns ErrUnreferencedSymbol if the symbol isn't used.
+//
+// Deprecated: use AssociateMap instead.
+func (insns Instructions) RewriteMapPtr(symbol string, fd int) error {
+ if symbol == "" {
+ return errors.New("empty symbol")
+ }
+
+ var found bool
+ for i := range insns {
+ ins := &insns[i]
+ if ins.Reference() != symbol {
+ continue
+ }
+
+ if !ins.IsLoadFromMap() {
+ return errors.New("not a load from a map")
+ }
+
+ ins.encodeMapFD(fd)
+
+ found = true
+ }
+
+ if !found {
+ return fmt.Errorf("symbol %s: %w", symbol, ErrUnreferencedSymbol)
+ }
+
+ return nil
+}
+
+// SymbolOffsets returns the set of symbols and their offset in
+// the instructions.
+func (insns Instructions) SymbolOffsets() (map[string]int, error) {
+ offsets := make(map[string]int)
+
+ for i, ins := range insns {
+ if ins.Symbol() == "" {
+ continue
+ }
+
+ if _, ok := offsets[ins.Symbol()]; ok {
+ return nil, fmt.Errorf("duplicate symbol %s", ins.Symbol())
+ }
+
+ offsets[ins.Symbol()] = i
+ }
+
+ return offsets, nil
+}
+
+// FunctionReferences returns a set of symbol names these Instructions make
+// bpf-to-bpf calls to.
+func (insns Instructions) FunctionReferences() []string {
+ calls := make(map[string]struct{})
+ for _, ins := range insns {
+ if ins.Constant != -1 {
+ // BPF-to-BPF calls have -1 constants.
+ continue
+ }
+
+ if ins.Reference() == "" {
+ continue
+ }
+
+ if !ins.IsFunctionReference() {
+ continue
+ }
+
+ calls[ins.Reference()] = struct{}{}
+ }
+
+ result := make([]string, 0, len(calls))
+ for call := range calls {
+ result = append(result, call)
+ }
+
+ sort.Strings(result)
+ return result
+}
+
+// ReferenceOffsets returns the set of references and their offset in
+// the instructions.
+func (insns Instructions) ReferenceOffsets() map[string][]int {
+ offsets := make(map[string][]int)
+
+ for i, ins := range insns {
+ if ins.Reference() == "" {
+ continue
+ }
+
+ offsets[ins.Reference()] = append(offsets[ins.Reference()], i)
+ }
+
+ return offsets
+}
+
+// Format implements fmt.Formatter.
+//
+// You can control indentation of symbols by
+// specifying a width. Setting a precision controls the indentation of
+// instructions.
+// The default character is a tab, which can be overridden by specifying
+// the ' ' space flag.
+func (insns Instructions) Format(f fmt.State, c rune) {
+ if c != 's' && c != 'v' {
+ fmt.Fprintf(f, "{UNKNOWN FORMAT '%c'}", c)
+ return
+ }
+
+ // Precision is better in this case, because it allows
+ // specifying 0 padding easily.
+ padding, ok := f.Precision()
+ if !ok {
+ padding = 1
+ }
+
+ indent := strings.Repeat("\t", padding)
+ if f.Flag(' ') {
+ indent = strings.Repeat(" ", padding)
+ }
+
+ symPadding, ok := f.Width()
+ if !ok {
+ symPadding = padding - 1
+ }
+ if symPadding < 0 {
+ symPadding = 0
+ }
+
+ symIndent := strings.Repeat("\t", symPadding)
+ if f.Flag(' ') {
+ symIndent = strings.Repeat(" ", symPadding)
+ }
+
+ // Guess how many digits we need at most, by assuming that all instructions
+ // are double wide.
+ highestOffset := len(insns) * 2
+ offsetWidth := int(math.Ceil(math.Log10(float64(highestOffset))))
+
+ iter := insns.Iterate()
+ for iter.Next() {
+ if iter.Ins.Symbol() != "" {
+ fmt.Fprintf(f, "%s%s:\n", symIndent, iter.Ins.Symbol())
+ }
+ if src := iter.Ins.Source(); src != nil {
+ line := strings.TrimSpace(src.String())
+ if line != "" {
+ fmt.Fprintf(f, "%s%*s; %s\n", indent, offsetWidth, " ", line)
+ }
+ }
+ fmt.Fprintf(f, "%s%*d: %v\n", indent, offsetWidth, iter.Offset, iter.Ins)
+ }
+}
+
+// Marshal encodes a BPF program into the kernel format.
+//
+// insns may be modified if there are unresolved jumps or bpf2bpf calls.
+//
+// Returns ErrUnsatisfiedProgramReference if there is a Reference Instruction
+// without a matching Symbol Instruction within insns.
+func (insns Instructions) Marshal(w io.Writer, bo binary.ByteOrder) error {
+ if err := insns.encodeFunctionReferences(); err != nil {
+ return err
+ }
+
+ if err := insns.encodeMapPointers(); err != nil {
+ return err
+ }
+
+ for i, ins := range insns {
+ if _, err := ins.Marshal(w, bo); err != nil {
+ return fmt.Errorf("instruction %d: %w", i, err)
+ }
+ }
+ return nil
+}
+
+// Tag calculates the kernel tag for a series of instructions.
+//
+// It mirrors bpf_prog_calc_tag in the kernel and so can be compared
+// to ProgramInfo.Tag to figure out whether a loaded program matches
+// certain instructions.
+func (insns Instructions) Tag(bo binary.ByteOrder) (string, error) {
+ h := sha1.New()
+ for i, ins := range insns {
+ if ins.IsLoadFromMap() {
+ ins.Constant = 0
+ }
+ _, err := ins.Marshal(h, bo)
+ if err != nil {
+ return "", fmt.Errorf("instruction %d: %w", i, err)
+ }
+ }
+ return hex.EncodeToString(h.Sum(nil)[:unix.BPF_TAG_SIZE]), nil
+}
+
+// encodeFunctionReferences populates the Offset (or Constant, depending on
+// the instruction type) field of instructions with a Reference field to point
+// to the offset of the corresponding instruction with a matching Symbol field.
+//
+// Only Reference Instructions that are either jumps or BPF function references
+// (calls or function pointer loads) are populated.
+//
+// Returns ErrUnsatisfiedProgramReference if there is a Reference Instruction
+// without at least one corresponding Symbol Instruction within insns.
+func (insns Instructions) encodeFunctionReferences() error {
+ // Index the offsets of instructions tagged as a symbol.
+ symbolOffsets := make(map[string]RawInstructionOffset)
+ iter := insns.Iterate()
+ for iter.Next() {
+ ins := iter.Ins
+
+ if ins.Symbol() == "" {
+ continue
+ }
+
+ if _, ok := symbolOffsets[ins.Symbol()]; ok {
+ return fmt.Errorf("duplicate symbol %s", ins.Symbol())
+ }
+
+ symbolOffsets[ins.Symbol()] = iter.Offset
+ }
+
+ // Find all instructions tagged as references to other symbols.
+ // Depending on the instruction type, populate their constant or offset
+ // fields to point to the symbol they refer to within the insn stream.
+ iter = insns.Iterate()
+ for iter.Next() {
+ i := iter.Index
+ offset := iter.Offset
+ ins := iter.Ins
+
+ if ins.Reference() == "" {
+ continue
+ }
+
+ switch {
+ case ins.IsFunctionReference() && ins.Constant == -1:
+ symOffset, ok := symbolOffsets[ins.Reference()]
+ if !ok {
+ return fmt.Errorf("%s at insn %d: symbol %q: %w", ins.OpCode, i, ins.Reference(), ErrUnsatisfiedProgramReference)
+ }
+
+ ins.Constant = int64(symOffset - offset - 1)
+
+ case ins.OpCode.Class().IsJump() && ins.Offset == -1:
+ symOffset, ok := symbolOffsets[ins.Reference()]
+ if !ok {
+ return fmt.Errorf("%s at insn %d: symbol %q: %w", ins.OpCode, i, ins.Reference(), ErrUnsatisfiedProgramReference)
+ }
+
+ ins.Offset = int16(symOffset - offset - 1)
+ }
+ }
+
+ return nil
+}
+
+// encodeMapPointers finds all Map Instructions and encodes their FDs
+// into their Constant fields.
+func (insns Instructions) encodeMapPointers() error {
+ iter := insns.Iterate()
+ for iter.Next() {
+ ins := iter.Ins
+
+ if !ins.IsLoadFromMap() {
+ continue
+ }
+
+ m := ins.Map()
+ if m == nil {
+ continue
+ }
+
+ fd := m.FD()
+ if fd < 0 {
+ return fmt.Errorf("map %s: %w", m, sys.ErrClosedFd)
+ }
+
+ ins.encodeMapFD(m.FD())
+ }
+
+ return nil
+}
+
+// Iterate allows iterating a BPF program while keeping track of
+// various offsets.
+//
+// Modifying the instruction slice will lead to undefined behaviour.
+func (insns Instructions) Iterate() *InstructionIterator {
+ return &InstructionIterator{insns: insns}
+}
+
+// InstructionIterator iterates over a BPF program.
+type InstructionIterator struct {
+ insns Instructions
+ // The instruction in question.
+ Ins *Instruction
+ // The index of the instruction in the original instruction slice.
+ Index int
+ // The offset of the instruction in raw BPF instructions. This accounts
+ // for double-wide instructions.
+ Offset RawInstructionOffset
+}
+
+// Next returns true as long as there are any instructions remaining.
+func (iter *InstructionIterator) Next() bool {
+ if len(iter.insns) == 0 {
+ return false
+ }
+
+ if iter.Ins != nil {
+ iter.Index++
+ iter.Offset += RawInstructionOffset(iter.Ins.OpCode.rawInstructions())
+ }
+ iter.Ins = &iter.insns[0]
+ iter.insns = iter.insns[1:]
+ return true
+}
+
+type bpfRegisters uint8
+
+func newBPFRegisters(dst, src Register, bo binary.ByteOrder) (bpfRegisters, error) {
+ switch bo {
+ case binary.LittleEndian:
+ return bpfRegisters((src << 4) | (dst & 0xF)), nil
+ case binary.BigEndian:
+ return bpfRegisters((dst << 4) | (src & 0xF)), nil
+ default:
+ return 0, fmt.Errorf("unrecognized ByteOrder %T", bo)
+ }
+}
+
+// IsUnreferencedSymbol returns true if err was caused by
+// an unreferenced symbol.
+//
+// Deprecated: use errors.Is(err, asm.ErrUnreferencedSymbol).
+func IsUnreferencedSymbol(err error) bool {
+ return errors.Is(err, ErrUnreferencedSymbol)
+}
diff --git a/vendor/github.com/cilium/ebpf/asm/jump.go b/vendor/github.com/cilium/ebpf/asm/jump.go
new file mode 100644
index 000000000..9a525b21a
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/asm/jump.go
@@ -0,0 +1,127 @@
+package asm
+
+//go:generate go run golang.org/x/tools/cmd/stringer@latest -output jump_string.go -type=JumpOp
+
+// JumpOp affect control flow.
+//
+// msb lsb
+// +----+-+---+
+// |OP |s|cls|
+// +----+-+---+
+type JumpOp uint8
+
+const jumpMask OpCode = aluMask
+
+const (
+ // InvalidJumpOp is returned by getters when invoked
+ // on non branch OpCodes
+ InvalidJumpOp JumpOp = 0xff
+ // Ja jumps by offset unconditionally
+ Ja JumpOp = 0x00
+ // JEq jumps by offset if r == imm
+ JEq JumpOp = 0x10
+ // JGT jumps by offset if r > imm
+ JGT JumpOp = 0x20
+ // JGE jumps by offset if r >= imm
+ JGE JumpOp = 0x30
+ // JSet jumps by offset if r & imm
+ JSet JumpOp = 0x40
+ // JNE jumps by offset if r != imm
+ JNE JumpOp = 0x50
+ // JSGT jumps by offset if signed r > signed imm
+ JSGT JumpOp = 0x60
+ // JSGE jumps by offset if signed r >= signed imm
+ JSGE JumpOp = 0x70
+ // Call builtin or user defined function from imm
+ Call JumpOp = 0x80
+ // Exit ends execution, with value in r0
+ Exit JumpOp = 0x90
+ // JLT jumps by offset if r < imm
+ JLT JumpOp = 0xa0
+ // JLE jumps by offset if r <= imm
+ JLE JumpOp = 0xb0
+ // JSLT jumps by offset if signed r < signed imm
+ JSLT JumpOp = 0xc0
+ // JSLE jumps by offset if signed r <= signed imm
+ JSLE JumpOp = 0xd0
+)
+
+// Return emits an exit instruction.
+//
+// Requires a return value in R0.
+func Return() Instruction {
+ return Instruction{
+ OpCode: OpCode(JumpClass).SetJumpOp(Exit),
+ }
+}
+
+// Op returns the OpCode for a given jump source.
+func (op JumpOp) Op(source Source) OpCode {
+ return OpCode(JumpClass).SetJumpOp(op).SetSource(source)
+}
+
+// Imm compares 64 bit dst to 64 bit value (sign extended), and adjusts PC by offset if the condition is fulfilled.
+func (op JumpOp) Imm(dst Register, value int32, label string) Instruction {
+ return Instruction{
+ OpCode: op.opCode(JumpClass, ImmSource),
+ Dst: dst,
+ Offset: -1,
+ Constant: int64(value),
+ }.WithReference(label)
+}
+
+// Imm32 compares 32 bit dst to 32 bit value, and adjusts PC by offset if the condition is fulfilled.
+// Requires kernel 5.1.
+func (op JumpOp) Imm32(dst Register, value int32, label string) Instruction {
+ return Instruction{
+ OpCode: op.opCode(Jump32Class, ImmSource),
+ Dst: dst,
+ Offset: -1,
+ Constant: int64(value),
+ }.WithReference(label)
+}
+
+// Reg compares 64 bit dst to 64 bit src, and adjusts PC by offset if the condition is fulfilled.
+func (op JumpOp) Reg(dst, src Register, label string) Instruction {
+ return Instruction{
+ OpCode: op.opCode(JumpClass, RegSource),
+ Dst: dst,
+ Src: src,
+ Offset: -1,
+ }.WithReference(label)
+}
+
+// Reg32 compares 32 bit dst to 32 bit src, and adjusts PC by offset if the condition is fulfilled.
+// Requires kernel 5.1.
+func (op JumpOp) Reg32(dst, src Register, label string) Instruction {
+ return Instruction{
+ OpCode: op.opCode(Jump32Class, RegSource),
+ Dst: dst,
+ Src: src,
+ Offset: -1,
+ }.WithReference(label)
+}
+
+func (op JumpOp) opCode(class Class, source Source) OpCode {
+ if op == Exit || op == Call || op == Ja {
+ return InvalidOpCode
+ }
+
+ return OpCode(class).SetJumpOp(op).SetSource(source)
+}
+
+// Label adjusts PC to the address of the label.
+func (op JumpOp) Label(label string) Instruction {
+ if op == Call {
+ return Instruction{
+ OpCode: OpCode(JumpClass).SetJumpOp(Call),
+ Src: PseudoCall,
+ Constant: -1,
+ }.WithReference(label)
+ }
+
+ return Instruction{
+ OpCode: OpCode(JumpClass).SetJumpOp(op),
+ Offset: -1,
+ }.WithReference(label)
+}
diff --git a/vendor/github.com/cilium/ebpf/asm/jump_string.go b/vendor/github.com/cilium/ebpf/asm/jump_string.go
new file mode 100644
index 000000000..85a4aaffa
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/asm/jump_string.go
@@ -0,0 +1,53 @@
+// Code generated by "stringer -output jump_string.go -type=JumpOp"; DO NOT EDIT.
+
+package asm
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[InvalidJumpOp-255]
+ _ = x[Ja-0]
+ _ = x[JEq-16]
+ _ = x[JGT-32]
+ _ = x[JGE-48]
+ _ = x[JSet-64]
+ _ = x[JNE-80]
+ _ = x[JSGT-96]
+ _ = x[JSGE-112]
+ _ = x[Call-128]
+ _ = x[Exit-144]
+ _ = x[JLT-160]
+ _ = x[JLE-176]
+ _ = x[JSLT-192]
+ _ = x[JSLE-208]
+}
+
+const _JumpOp_name = "JaJEqJGTJGEJSetJNEJSGTJSGECallExitJLTJLEJSLTJSLEInvalidJumpOp"
+
+var _JumpOp_map = map[JumpOp]string{
+ 0: _JumpOp_name[0:2],
+ 16: _JumpOp_name[2:5],
+ 32: _JumpOp_name[5:8],
+ 48: _JumpOp_name[8:11],
+ 64: _JumpOp_name[11:15],
+ 80: _JumpOp_name[15:18],
+ 96: _JumpOp_name[18:22],
+ 112: _JumpOp_name[22:26],
+ 128: _JumpOp_name[26:30],
+ 144: _JumpOp_name[30:34],
+ 160: _JumpOp_name[34:37],
+ 176: _JumpOp_name[37:40],
+ 192: _JumpOp_name[40:44],
+ 208: _JumpOp_name[44:48],
+ 255: _JumpOp_name[48:61],
+}
+
+func (i JumpOp) String() string {
+ if str, ok := _JumpOp_map[i]; ok {
+ return str
+ }
+ return "JumpOp(" + strconv.FormatInt(int64(i), 10) + ")"
+}
diff --git a/vendor/github.com/cilium/ebpf/asm/load_store.go b/vendor/github.com/cilium/ebpf/asm/load_store.go
new file mode 100644
index 000000000..574ee377c
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/asm/load_store.go
@@ -0,0 +1,204 @@
+package asm
+
+//go:generate go run golang.org/x/tools/cmd/stringer@latest -output load_store_string.go -type=Mode,Size
+
+// Mode for load and store operations
+//
+// msb lsb
+// +---+--+---+
+// |MDE|sz|cls|
+// +---+--+---+
+type Mode uint8
+
+const modeMask OpCode = 0xe0
+
+const (
+ // InvalidMode is returned by getters when invoked
+ // on non load / store OpCodes
+ InvalidMode Mode = 0xff
+ // ImmMode - immediate value
+ ImmMode Mode = 0x00
+ // AbsMode - immediate value + offset
+ AbsMode Mode = 0x20
+ // IndMode - indirect (imm+src)
+ IndMode Mode = 0x40
+ // MemMode - load from memory
+ MemMode Mode = 0x60
+ // XAddMode - add atomically across processors.
+ XAddMode Mode = 0xc0
+)
+
+// Size of load and store operations
+//
+// msb lsb
+// +---+--+---+
+// |mde|SZ|cls|
+// +---+--+---+
+type Size uint8
+
+const sizeMask OpCode = 0x18
+
+const (
+ // InvalidSize is returned by getters when invoked
+ // on non load / store OpCodes
+ InvalidSize Size = 0xff
+ // DWord - double word; 64 bits
+ DWord Size = 0x18
+ // Word - word; 32 bits
+ Word Size = 0x00
+ // Half - half-word; 16 bits
+ Half Size = 0x08
+ // Byte - byte; 8 bits
+ Byte Size = 0x10
+)
+
+// Sizeof returns the size in bytes.
+func (s Size) Sizeof() int {
+ switch s {
+ case DWord:
+ return 8
+ case Word:
+ return 4
+ case Half:
+ return 2
+ case Byte:
+ return 1
+ default:
+ return -1
+ }
+}
+
+// LoadMemOp returns the OpCode to load a value of given size from memory.
+func LoadMemOp(size Size) OpCode {
+ return OpCode(LdXClass).SetMode(MemMode).SetSize(size)
+}
+
+// LoadMem emits `dst = *(size *)(src + offset)`.
+func LoadMem(dst, src Register, offset int16, size Size) Instruction {
+ return Instruction{
+ OpCode: LoadMemOp(size),
+ Dst: dst,
+ Src: src,
+ Offset: offset,
+ }
+}
+
+// LoadImmOp returns the OpCode to load an immediate of given size.
+//
+// As of kernel 4.20, only DWord size is accepted.
+func LoadImmOp(size Size) OpCode {
+ return OpCode(LdClass).SetMode(ImmMode).SetSize(size)
+}
+
+// LoadImm emits `dst = (size)value`.
+//
+// As of kernel 4.20, only DWord size is accepted.
+func LoadImm(dst Register, value int64, size Size) Instruction {
+ return Instruction{
+ OpCode: LoadImmOp(size),
+ Dst: dst,
+ Constant: value,
+ }
+}
+
+// LoadMapPtr stores a pointer to a map in dst.
+func LoadMapPtr(dst Register, fd int) Instruction {
+ if fd < 0 {
+ return Instruction{OpCode: InvalidOpCode}
+ }
+
+ return Instruction{
+ OpCode: LoadImmOp(DWord),
+ Dst: dst,
+ Src: PseudoMapFD,
+ Constant: int64(uint32(fd)),
+ }
+}
+
+// LoadMapValue stores a pointer to the value at a certain offset of a map.
+func LoadMapValue(dst Register, fd int, offset uint32) Instruction {
+ if fd < 0 {
+ return Instruction{OpCode: InvalidOpCode}
+ }
+
+ fdAndOffset := (uint64(offset) << 32) | uint64(uint32(fd))
+ return Instruction{
+ OpCode: LoadImmOp(DWord),
+ Dst: dst,
+ Src: PseudoMapValue,
+ Constant: int64(fdAndOffset),
+ }
+}
+
+// LoadIndOp returns the OpCode for loading a value of given size from an sk_buff.
+func LoadIndOp(size Size) OpCode {
+ return OpCode(LdClass).SetMode(IndMode).SetSize(size)
+}
+
+// LoadInd emits `dst = ntoh(*(size *)(((sk_buff *)R6)->data + src + offset))`.
+func LoadInd(dst, src Register, offset int32, size Size) Instruction {
+ return Instruction{
+ OpCode: LoadIndOp(size),
+ Dst: dst,
+ Src: src,
+ Constant: int64(offset),
+ }
+}
+
+// LoadAbsOp returns the OpCode for loading a value of given size from an sk_buff.
+func LoadAbsOp(size Size) OpCode {
+ return OpCode(LdClass).SetMode(AbsMode).SetSize(size)
+}
+
+// LoadAbs emits `r0 = ntoh(*(size *)(((sk_buff *)R6)->data + offset))`.
+func LoadAbs(offset int32, size Size) Instruction {
+ return Instruction{
+ OpCode: LoadAbsOp(size),
+ Dst: R0,
+ Constant: int64(offset),
+ }
+}
+
+// StoreMemOp returns the OpCode for storing a register of given size in memory.
+func StoreMemOp(size Size) OpCode {
+ return OpCode(StXClass).SetMode(MemMode).SetSize(size)
+}
+
+// StoreMem emits `*(size *)(dst + offset) = src`
+func StoreMem(dst Register, offset int16, src Register, size Size) Instruction {
+ return Instruction{
+ OpCode: StoreMemOp(size),
+ Dst: dst,
+ Src: src,
+ Offset: offset,
+ }
+}
+
+// StoreImmOp returns the OpCode for storing an immediate of given size in memory.
+func StoreImmOp(size Size) OpCode {
+ return OpCode(StClass).SetMode(MemMode).SetSize(size)
+}
+
+// StoreImm emits `*(size *)(dst + offset) = value`.
+func StoreImm(dst Register, offset int16, value int64, size Size) Instruction {
+ return Instruction{
+ OpCode: StoreImmOp(size),
+ Dst: dst,
+ Offset: offset,
+ Constant: value,
+ }
+}
+
+// StoreXAddOp returns the OpCode to atomically add a register to a value in memory.
+func StoreXAddOp(size Size) OpCode {
+ return OpCode(StXClass).SetMode(XAddMode).SetSize(size)
+}
+
+// StoreXAdd atomically adds src to *dst.
+func StoreXAdd(dst, src Register, size Size) Instruction {
+ return Instruction{
+ OpCode: StoreXAddOp(size),
+ Dst: dst,
+ Src: src,
+ }
+}
diff --git a/vendor/github.com/cilium/ebpf/asm/load_store_string.go b/vendor/github.com/cilium/ebpf/asm/load_store_string.go
new file mode 100644
index 000000000..76d29a075
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/asm/load_store_string.go
@@ -0,0 +1,80 @@
+// Code generated by "stringer -output load_store_string.go -type=Mode,Size"; DO NOT EDIT.
+
+package asm
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[InvalidMode-255]
+ _ = x[ImmMode-0]
+ _ = x[AbsMode-32]
+ _ = x[IndMode-64]
+ _ = x[MemMode-96]
+ _ = x[XAddMode-192]
+}
+
+const (
+ _Mode_name_0 = "ImmMode"
+ _Mode_name_1 = "AbsMode"
+ _Mode_name_2 = "IndMode"
+ _Mode_name_3 = "MemMode"
+ _Mode_name_4 = "XAddMode"
+ _Mode_name_5 = "InvalidMode"
+)
+
+func (i Mode) String() string {
+ switch {
+ case i == 0:
+ return _Mode_name_0
+ case i == 32:
+ return _Mode_name_1
+ case i == 64:
+ return _Mode_name_2
+ case i == 96:
+ return _Mode_name_3
+ case i == 192:
+ return _Mode_name_4
+ case i == 255:
+ return _Mode_name_5
+ default:
+ return "Mode(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+}
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[InvalidSize-255]
+ _ = x[DWord-24]
+ _ = x[Word-0]
+ _ = x[Half-8]
+ _ = x[Byte-16]
+}
+
+const (
+ _Size_name_0 = "Word"
+ _Size_name_1 = "Half"
+ _Size_name_2 = "Byte"
+ _Size_name_3 = "DWord"
+ _Size_name_4 = "InvalidSize"
+)
+
+func (i Size) String() string {
+ switch {
+ case i == 0:
+ return _Size_name_0
+ case i == 8:
+ return _Size_name_1
+ case i == 16:
+ return _Size_name_2
+ case i == 24:
+ return _Size_name_3
+ case i == 255:
+ return _Size_name_4
+ default:
+ return "Size(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+}
diff --git a/vendor/github.com/cilium/ebpf/asm/metadata.go b/vendor/github.com/cilium/ebpf/asm/metadata.go
new file mode 100644
index 000000000..dd368a936
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/asm/metadata.go
@@ -0,0 +1,80 @@
+package asm
+
+// Metadata contains metadata about an instruction.
+type Metadata struct {
+ head *metaElement
+}
+
+type metaElement struct {
+ next *metaElement
+ key, value interface{}
+}
+
+// Find the element containing key.
+//
+// Returns nil if there is no such element.
+func (m *Metadata) find(key interface{}) *metaElement {
+ for e := m.head; e != nil; e = e.next {
+ if e.key == key {
+ return e
+ }
+ }
+ return nil
+}
+
+// Remove an element from the linked list.
+//
+// Copies as many elements of the list as necessary to remove r, but doesn't
+// perform a full copy.
+func (m *Metadata) remove(r *metaElement) {
+ current := &m.head
+ for e := m.head; e != nil; e = e.next {
+ if e == r {
+ // We've found the element we want to remove.
+ *current = e.next
+
+ // No need to copy the tail.
+ return
+ }
+
+ // There is another element in front of the one we want to remove.
+ // We have to copy it to be able to change metaElement.next.
+ cpy := &metaElement{key: e.key, value: e.value}
+ *current = cpy
+ current = &cpy.next
+ }
+}
+
+// Set a key to a value.
+//
+// If value is nil, the key is removed. Avoids modifying old metadata by
+// copying if necessary.
+func (m *Metadata) Set(key, value interface{}) {
+ if e := m.find(key); e != nil {
+ if e.value == value {
+ // Key is present and the value is the same. Nothing to do.
+ return
+ }
+
+ // Key is present with a different value. Create a copy of the list
+ // which doesn't have the element in it.
+ m.remove(e)
+ }
+
+ // m.head is now a linked list that doesn't contain key.
+ if value == nil {
+ return
+ }
+
+ m.head = &metaElement{key: key, value: value, next: m.head}
+}
+
+// Get the value of a key.
+//
+// Returns nil if no value with the given key is present.
+func (m *Metadata) Get(key interface{}) interface{} {
+ if e := m.find(key); e != nil {
+ return e.value
+ }
+ return nil
+}
diff --git a/vendor/github.com/cilium/ebpf/asm/opcode.go b/vendor/github.com/cilium/ebpf/asm/opcode.go
new file mode 100644
index 000000000..845c5521f
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/asm/opcode.go
@@ -0,0 +1,271 @@
+package asm
+
+import (
+ "fmt"
+ "strings"
+)
+
+//go:generate go run golang.org/x/tools/cmd/stringer@latest -output opcode_string.go -type=Class
+
+// Class of operations
+//
+// msb lsb
+// +---+--+---+
+// | ?? |CLS|
+// +---+--+---+
+type Class uint8
+
+const classMask OpCode = 0x07
+
+const (
+ // LdClass loads immediate values into registers.
+ // Also used for non-standard load operations from cBPF.
+ LdClass Class = 0x00
+ // LdXClass loads memory into registers.
+ LdXClass Class = 0x01
+ // StClass stores immediate values to memory.
+ StClass Class = 0x02
+ // StXClass stores registers to memory.
+ StXClass Class = 0x03
+ // ALUClass describes arithmetic operators.
+ ALUClass Class = 0x04
+ // JumpClass describes jump operators.
+ JumpClass Class = 0x05
+ // Jump32Class describes jump operators with 32-bit comparisons.
+ // Requires kernel 5.1.
+ Jump32Class Class = 0x06
+ // ALU64Class describes arithmetic operators in 64-bit mode.
+ ALU64Class Class = 0x07
+)
+
+// IsLoad checks if this is either LdClass or LdXClass.
+func (cls Class) IsLoad() bool {
+ return cls == LdClass || cls == LdXClass
+}
+
+// IsStore checks if this is either StClass or StXClass.
+func (cls Class) IsStore() bool {
+ return cls == StClass || cls == StXClass
+}
+
+func (cls Class) isLoadOrStore() bool {
+ return cls.IsLoad() || cls.IsStore()
+}
+
+// IsALU checks if this is either ALUClass or ALU64Class.
+func (cls Class) IsALU() bool {
+ return cls == ALUClass || cls == ALU64Class
+}
+
+// IsJump checks if this is either JumpClass or Jump32Class.
+func (cls Class) IsJump() bool {
+ return cls == JumpClass || cls == Jump32Class
+}
+
+func (cls Class) isJumpOrALU() bool {
+ return cls.IsJump() || cls.IsALU()
+}
+
+// OpCode is a packed eBPF opcode.
+//
+// Its encoding is defined by a Class value:
+//
+// msb lsb
+// +----+-+---+
+// | ???? |CLS|
+// +----+-+---+
+type OpCode uint8
+
+// InvalidOpCode is returned by setters on OpCode
+const InvalidOpCode OpCode = 0xff
+
+// rawInstructions returns the number of BPF instructions required
+// to encode this opcode.
+func (op OpCode) rawInstructions() int {
+ if op.IsDWordLoad() {
+ return 2
+ }
+ return 1
+}
+
+func (op OpCode) IsDWordLoad() bool {
+ return op == LoadImmOp(DWord)
+}
+
+// Class returns the class of operation.
+func (op OpCode) Class() Class {
+ return Class(op & classMask)
+}
+
+// Mode returns the mode for load and store operations.
+func (op OpCode) Mode() Mode {
+ if !op.Class().isLoadOrStore() {
+ return InvalidMode
+ }
+ return Mode(op & modeMask)
+}
+
+// Size returns the size for load and store operations.
+func (op OpCode) Size() Size {
+ if !op.Class().isLoadOrStore() {
+ return InvalidSize
+ }
+ return Size(op & sizeMask)
+}
+
+// Source returns the source for branch and ALU operations.
+func (op OpCode) Source() Source {
+ if !op.Class().isJumpOrALU() || op.ALUOp() == Swap {
+ return InvalidSource
+ }
+ return Source(op & sourceMask)
+}
+
+// ALUOp returns the ALUOp.
+func (op OpCode) ALUOp() ALUOp {
+ if !op.Class().IsALU() {
+ return InvalidALUOp
+ }
+ return ALUOp(op & aluMask)
+}
+
+// Endianness returns the Endianness for a byte swap instruction.
+func (op OpCode) Endianness() Endianness {
+ if op.ALUOp() != Swap {
+ return InvalidEndian
+ }
+ return Endianness(op & endianMask)
+}
+
+// JumpOp returns the JumpOp.
+// Returns InvalidJumpOp if it doesn't encode a jump.
+func (op OpCode) JumpOp() JumpOp {
+ if !op.Class().IsJump() {
+ return InvalidJumpOp
+ }
+
+ jumpOp := JumpOp(op & jumpMask)
+
+ // Some JumpOps are only supported by JumpClass, not Jump32Class.
+ if op.Class() == Jump32Class && (jumpOp == Exit || jumpOp == Call || jumpOp == Ja) {
+ return InvalidJumpOp
+ }
+
+ return jumpOp
+}
+
+// SetMode sets the mode on load and store operations.
+//
+// Returns InvalidOpCode if op is of the wrong class.
+func (op OpCode) SetMode(mode Mode) OpCode {
+ if !op.Class().isLoadOrStore() || !valid(OpCode(mode), modeMask) {
+ return InvalidOpCode
+ }
+ return (op & ^modeMask) | OpCode(mode)
+}
+
+// SetSize sets the size on load and store operations.
+//
+// Returns InvalidOpCode if op is of the wrong class.
+func (op OpCode) SetSize(size Size) OpCode {
+ if !op.Class().isLoadOrStore() || !valid(OpCode(size), sizeMask) {
+ return InvalidOpCode
+ }
+ return (op & ^sizeMask) | OpCode(size)
+}
+
+// SetSource sets the source on jump and ALU operations.
+//
+// Returns InvalidOpCode if op is of the wrong class.
+func (op OpCode) SetSource(source Source) OpCode {
+ if !op.Class().isJumpOrALU() || !valid(OpCode(source), sourceMask) {
+ return InvalidOpCode
+ }
+ return (op & ^sourceMask) | OpCode(source)
+}
+
+// SetALUOp sets the ALUOp on ALU operations.
+//
+// Returns InvalidOpCode if op is of the wrong class.
+func (op OpCode) SetALUOp(alu ALUOp) OpCode {
+ if !op.Class().IsALU() || !valid(OpCode(alu), aluMask) {
+ return InvalidOpCode
+ }
+ return (op & ^aluMask) | OpCode(alu)
+}
+
+// SetJumpOp sets the JumpOp on jump operations.
+//
+// Returns InvalidOpCode if op is of the wrong class.
+func (op OpCode) SetJumpOp(jump JumpOp) OpCode {
+ if !op.Class().IsJump() || !valid(OpCode(jump), jumpMask) {
+ return InvalidOpCode
+ }
+
+ newOp := (op & ^jumpMask) | OpCode(jump)
+
+ // Check newOp is legal.
+ if newOp.JumpOp() == InvalidJumpOp {
+ return InvalidOpCode
+ }
+
+ return newOp
+}
+
+func (op OpCode) String() string {
+ var f strings.Builder
+
+ switch class := op.Class(); {
+ case class.isLoadOrStore():
+ f.WriteString(strings.TrimSuffix(class.String(), "Class"))
+
+ mode := op.Mode()
+ f.WriteString(strings.TrimSuffix(mode.String(), "Mode"))
+
+ switch op.Size() {
+ case DWord:
+ f.WriteString("DW")
+ case Word:
+ f.WriteString("W")
+ case Half:
+ f.WriteString("H")
+ case Byte:
+ f.WriteString("B")
+ }
+
+ case class.IsALU():
+ f.WriteString(op.ALUOp().String())
+
+ if op.ALUOp() == Swap {
+ // Width for Endian is controlled by Constant
+ f.WriteString(op.Endianness().String())
+ } else {
+ if class == ALUClass {
+ f.WriteString("32")
+ }
+
+ f.WriteString(strings.TrimSuffix(op.Source().String(), "Source"))
+ }
+
+ case class.IsJump():
+ f.WriteString(op.JumpOp().String())
+
+ if class == Jump32Class {
+ f.WriteString("32")
+ }
+
+ if jop := op.JumpOp(); jop != Exit && jop != Call {
+ f.WriteString(strings.TrimSuffix(op.Source().String(), "Source"))
+ }
+
+ default:
+ fmt.Fprintf(&f, "OpCode(%#x)", uint8(op))
+ }
+
+ return f.String()
+}
+
+// valid returns true if all bits in value are covered by mask.
+func valid(value, mask OpCode) bool {
+ return value & ^mask == 0
+}
diff --git a/vendor/github.com/cilium/ebpf/asm/opcode_string.go b/vendor/github.com/cilium/ebpf/asm/opcode_string.go
new file mode 100644
index 000000000..58bc3e7e7
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/asm/opcode_string.go
@@ -0,0 +1,30 @@
+// Code generated by "stringer -output opcode_string.go -type=Class"; DO NOT EDIT.
+
+package asm
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[LdClass-0]
+ _ = x[LdXClass-1]
+ _ = x[StClass-2]
+ _ = x[StXClass-3]
+ _ = x[ALUClass-4]
+ _ = x[JumpClass-5]
+ _ = x[Jump32Class-6]
+ _ = x[ALU64Class-7]
+}
+
+const _Class_name = "LdClassLdXClassStClassStXClassALUClassJumpClassJump32ClassALU64Class"
+
+var _Class_index = [...]uint8{0, 7, 15, 22, 30, 38, 47, 58, 68}
+
+func (i Class) String() string {
+ if i >= Class(len(_Class_index)-1) {
+ return "Class(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _Class_name[_Class_index[i]:_Class_index[i+1]]
+}
diff --git a/vendor/github.com/cilium/ebpf/asm/register.go b/vendor/github.com/cilium/ebpf/asm/register.go
new file mode 100644
index 000000000..457a3b8a8
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/asm/register.go
@@ -0,0 +1,51 @@
+package asm
+
+import (
+ "fmt"
+)
+
+// Register is the source or destination of most operations.
+type Register uint8
+
+// R0 contains return values.
+const R0 Register = 0
+
+// Registers for function arguments.
+const (
+ R1 Register = R0 + 1 + iota
+ R2
+ R3
+ R4
+ R5
+)
+
+// Callee saved registers preserved by function calls.
+const (
+ R6 Register = R5 + 1 + iota
+ R7
+ R8
+ R9
+)
+
+// Read-only frame pointer to access stack.
+const (
+ R10 Register = R9 + 1
+ RFP = R10
+)
+
+// Pseudo registers used by 64bit loads and jumps
+const (
+ PseudoMapFD = R1 // BPF_PSEUDO_MAP_FD
+ PseudoMapValue = R2 // BPF_PSEUDO_MAP_VALUE
+ PseudoCall = R1 // BPF_PSEUDO_CALL
+ PseudoFunc = R4 // BPF_PSEUDO_FUNC
+ PseudoKfuncCall = R2 // BPF_PSEUDO_KFUNC_CALL
+)
+
+func (r Register) String() string {
+ v := uint8(r)
+ if v == 10 {
+ return "rfp"
+ }
+ return fmt.Sprintf("r%d", v)
+}
diff --git a/vendor/github.com/cilium/ebpf/attachtype_string.go b/vendor/github.com/cilium/ebpf/attachtype_string.go
new file mode 100644
index 000000000..add2a3b5c
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/attachtype_string.go
@@ -0,0 +1,66 @@
+// Code generated by "stringer -type AttachType -trimprefix Attach"; DO NOT EDIT.
+
+package ebpf
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[AttachNone-0]
+ _ = x[AttachCGroupInetIngress-0]
+ _ = x[AttachCGroupInetEgress-1]
+ _ = x[AttachCGroupInetSockCreate-2]
+ _ = x[AttachCGroupSockOps-3]
+ _ = x[AttachSkSKBStreamParser-4]
+ _ = x[AttachSkSKBStreamVerdict-5]
+ _ = x[AttachCGroupDevice-6]
+ _ = x[AttachSkMsgVerdict-7]
+ _ = x[AttachCGroupInet4Bind-8]
+ _ = x[AttachCGroupInet6Bind-9]
+ _ = x[AttachCGroupInet4Connect-10]
+ _ = x[AttachCGroupInet6Connect-11]
+ _ = x[AttachCGroupInet4PostBind-12]
+ _ = x[AttachCGroupInet6PostBind-13]
+ _ = x[AttachCGroupUDP4Sendmsg-14]
+ _ = x[AttachCGroupUDP6Sendmsg-15]
+ _ = x[AttachLircMode2-16]
+ _ = x[AttachFlowDissector-17]
+ _ = x[AttachCGroupSysctl-18]
+ _ = x[AttachCGroupUDP4Recvmsg-19]
+ _ = x[AttachCGroupUDP6Recvmsg-20]
+ _ = x[AttachCGroupGetsockopt-21]
+ _ = x[AttachCGroupSetsockopt-22]
+ _ = x[AttachTraceRawTp-23]
+ _ = x[AttachTraceFEntry-24]
+ _ = x[AttachTraceFExit-25]
+ _ = x[AttachModifyReturn-26]
+ _ = x[AttachLSMMac-27]
+ _ = x[AttachTraceIter-28]
+ _ = x[AttachCgroupInet4GetPeername-29]
+ _ = x[AttachCgroupInet6GetPeername-30]
+ _ = x[AttachCgroupInet4GetSockname-31]
+ _ = x[AttachCgroupInet6GetSockname-32]
+ _ = x[AttachXDPDevMap-33]
+ _ = x[AttachCgroupInetSockRelease-34]
+ _ = x[AttachXDPCPUMap-35]
+ _ = x[AttachSkLookup-36]
+ _ = x[AttachXDP-37]
+ _ = x[AttachSkSKBVerdict-38]
+ _ = x[AttachSkReuseportSelect-39]
+ _ = x[AttachSkReuseportSelectOrMigrate-40]
+ _ = x[AttachPerfEvent-41]
+ _ = x[AttachTraceKprobeMulti-42]
+}
+
+const _AttachType_name = "NoneCGroupInetEgressCGroupInetSockCreateCGroupSockOpsSkSKBStreamParserSkSKBStreamVerdictCGroupDeviceSkMsgVerdictCGroupInet4BindCGroupInet6BindCGroupInet4ConnectCGroupInet6ConnectCGroupInet4PostBindCGroupInet6PostBindCGroupUDP4SendmsgCGroupUDP6SendmsgLircMode2FlowDissectorCGroupSysctlCGroupUDP4RecvmsgCGroupUDP6RecvmsgCGroupGetsockoptCGroupSetsockoptTraceRawTpTraceFEntryTraceFExitModifyReturnLSMMacTraceIterCgroupInet4GetPeernameCgroupInet6GetPeernameCgroupInet4GetSocknameCgroupInet6GetSocknameXDPDevMapCgroupInetSockReleaseXDPCPUMapSkLookupXDPSkSKBVerdictSkReuseportSelectSkReuseportSelectOrMigratePerfEventTraceKprobeMulti"
+
+var _AttachType_index = [...]uint16{0, 4, 20, 40, 53, 70, 88, 100, 112, 127, 142, 160, 178, 197, 216, 233, 250, 259, 272, 284, 301, 318, 334, 350, 360, 371, 381, 393, 399, 408, 430, 452, 474, 496, 505, 526, 535, 543, 546, 558, 575, 601, 610, 626}
+
+func (i AttachType) String() string {
+ if i >= AttachType(len(_AttachType_index)-1) {
+ return "AttachType(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _AttachType_name[_AttachType_index[i]:_AttachType_index[i+1]]
+}
diff --git a/vendor/github.com/cilium/ebpf/btf/btf.go b/vendor/github.com/cilium/ebpf/btf/btf.go
new file mode 100644
index 000000000..a2ee2d130
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/btf/btf.go
@@ -0,0 +1,701 @@
+package btf
+
+import (
+ "bufio"
+ "debug/elf"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "os"
+ "reflect"
+ "sync"
+
+ "github.com/cilium/ebpf/internal"
+ "github.com/cilium/ebpf/internal/sys"
+)
+
+const btfMagic = 0xeB9F
+
+// Errors returned by BTF functions.
+var (
+ ErrNotSupported = internal.ErrNotSupported
+ ErrNotFound = errors.New("not found")
+ ErrNoExtendedInfo = errors.New("no extended info")
+ ErrMultipleMatches = errors.New("multiple matching types")
+)
+
+// ID represents the unique ID of a BTF object.
+type ID = sys.BTFID
+
+// Spec allows querying a set of Types and loading the set into the
+// kernel.
+type Spec struct {
+ // All types contained by the spec, not including types from the base in
+ // case the spec was parsed from split BTF.
+ types []Type
+
+ // Type IDs indexed by type.
+ typeIDs map[Type]TypeID
+
+ // The ID of the first type in types.
+ firstTypeID TypeID
+
+ // Types indexed by essential name.
+ // Includes all struct flavors and types with the same name.
+ namedTypes map[essentialName][]Type
+
+ // String table from ELF.
+ strings *stringTable
+
+ // Byte order of the ELF we decoded the spec from, may be nil.
+ byteOrder binary.ByteOrder
+}
+
+// LoadSpec opens file and calls LoadSpecFromReader on it.
+func LoadSpec(file string) (*Spec, error) {
+ fh, err := os.Open(file)
+ if err != nil {
+ return nil, err
+ }
+ defer fh.Close()
+
+ return LoadSpecFromReader(fh)
+}
+
+// LoadSpecFromReader reads from an ELF or a raw BTF blob.
+//
+// Returns ErrNotFound if reading from an ELF which contains no BTF. ExtInfos
+// may be nil.
+func LoadSpecFromReader(rd io.ReaderAt) (*Spec, error) {
+ file, err := internal.NewSafeELFFile(rd)
+ if err != nil {
+ if bo := guessRawBTFByteOrder(rd); bo != nil {
+ return loadRawSpec(io.NewSectionReader(rd, 0, math.MaxInt64), bo, nil)
+ }
+
+ return nil, err
+ }
+
+ return loadSpecFromELF(file)
+}
+
+// LoadSpecAndExtInfosFromReader reads from an ELF.
+//
+// ExtInfos may be nil if the ELF doesn't contain section metadata.
+// Returns ErrNotFound if the ELF contains no BTF.
+func LoadSpecAndExtInfosFromReader(rd io.ReaderAt) (*Spec, *ExtInfos, error) {
+ file, err := internal.NewSafeELFFile(rd)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ spec, err := loadSpecFromELF(file)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ extInfos, err := loadExtInfosFromELF(file, spec)
+ if err != nil && !errors.Is(err, ErrNotFound) {
+ return nil, nil, err
+ }
+
+ return spec, extInfos, nil
+}
+
+// symbolOffsets extracts all symbols offsets from an ELF and indexes them by
+// section and variable name.
+//
+// References to variables in BTF data sections carry unsigned 32-bit offsets.
+// Some ELF symbols (e.g. in vmlinux) may point to virtual memory that is well
+// beyond this range. Since these symbols cannot be described by BTF info,
+// ignore them here.
+func symbolOffsets(file *internal.SafeELFFile) (map[symbol]uint32, error) {
+ symbols, err := file.Symbols()
+ if err != nil {
+ return nil, fmt.Errorf("can't read symbols: %v", err)
+ }
+
+ offsets := make(map[symbol]uint32)
+ for _, sym := range symbols {
+ if idx := sym.Section; idx >= elf.SHN_LORESERVE && idx <= elf.SHN_HIRESERVE {
+ // Ignore things like SHN_ABS
+ continue
+ }
+
+ if sym.Value > math.MaxUint32 {
+ // VarSecinfo offset is u32, cannot reference symbols in higher regions.
+ continue
+ }
+
+ if int(sym.Section) >= len(file.Sections) {
+ return nil, fmt.Errorf("symbol %s: invalid section %d", sym.Name, sym.Section)
+ }
+
+ secName := file.Sections[sym.Section].Name
+ offsets[symbol{secName, sym.Name}] = uint32(sym.Value)
+ }
+
+ return offsets, nil
+}
+
+func loadSpecFromELF(file *internal.SafeELFFile) (*Spec, error) {
+ var (
+ btfSection *elf.Section
+ sectionSizes = make(map[string]uint32)
+ )
+
+ for _, sec := range file.Sections {
+ switch sec.Name {
+ case ".BTF":
+ btfSection = sec
+ default:
+ if sec.Type != elf.SHT_PROGBITS && sec.Type != elf.SHT_NOBITS {
+ break
+ }
+
+ if sec.Size > math.MaxUint32 {
+ return nil, fmt.Errorf("section %s exceeds maximum size", sec.Name)
+ }
+
+ sectionSizes[sec.Name] = uint32(sec.Size)
+ }
+ }
+
+ if btfSection == nil {
+ return nil, fmt.Errorf("btf: %w", ErrNotFound)
+ }
+
+ offsets, err := symbolOffsets(file)
+ if err != nil {
+ return nil, err
+ }
+
+ if btfSection.ReaderAt == nil {
+ return nil, fmt.Errorf("compressed BTF is not supported")
+ }
+
+ spec, err := loadRawSpec(btfSection.ReaderAt, file.ByteOrder, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ err = fixupDatasec(spec.types, sectionSizes, offsets)
+ if err != nil {
+ return nil, err
+ }
+
+ return spec, nil
+}
+
+func loadRawSpec(btf io.ReaderAt, bo binary.ByteOrder, base *Spec) (*Spec, error) {
+ var (
+ baseStrings *stringTable
+ firstTypeID TypeID
+ err error
+ )
+
+ if base != nil {
+ if base.firstTypeID != 0 {
+ return nil, fmt.Errorf("can't use split BTF as base")
+ }
+
+ baseStrings = base.strings
+
+ firstTypeID, err = base.nextTypeID()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ rawTypes, rawStrings, err := parseBTF(btf, bo, baseStrings)
+ if err != nil {
+ return nil, err
+ }
+
+ types, err := inflateRawTypes(rawTypes, rawStrings, base)
+ if err != nil {
+ return nil, err
+ }
+
+ typeIDs, typesByName := indexTypes(types, firstTypeID)
+
+ return &Spec{
+ namedTypes: typesByName,
+ typeIDs: typeIDs,
+ types: types,
+ firstTypeID: firstTypeID,
+ strings: rawStrings,
+ byteOrder: bo,
+ }, nil
+}
+
+func indexTypes(types []Type, firstTypeID TypeID) (map[Type]TypeID, map[essentialName][]Type) {
+ namedTypes := 0
+ for _, typ := range types {
+ if typ.TypeName() != "" {
+ // Do a pre-pass to figure out how big types by name has to be.
+ // Most types have unique names, so it's OK to ignore essentialName
+ // here.
+ namedTypes++
+ }
+ }
+
+ typeIDs := make(map[Type]TypeID, len(types))
+ typesByName := make(map[essentialName][]Type, namedTypes)
+
+ for i, typ := range types {
+ if name := newEssentialName(typ.TypeName()); name != "" {
+ typesByName[name] = append(typesByName[name], typ)
+ }
+ typeIDs[typ] = firstTypeID + TypeID(i)
+ }
+
+ return typeIDs, typesByName
+}
+
+// LoadKernelSpec returns the current kernel's BTF information.
+//
+// Defaults to /sys/kernel/btf/vmlinux and falls back to scanning the file system
+// for vmlinux ELFs. Returns an error wrapping ErrNotSupported if BTF is not enabled.
+func LoadKernelSpec() (*Spec, error) {
+ spec, _, err := kernelSpec()
+ if err != nil {
+ return nil, err
+ }
+ return spec.Copy(), nil
+}
+
+var kernelBTF struct {
+ sync.RWMutex
+ spec *Spec
+ // True if the spec was read from an ELF instead of raw BTF in /sys.
+ fallback bool
+}
+
+// FlushKernelSpec removes any cached kernel type information.
+func FlushKernelSpec() {
+ kernelBTF.Lock()
+ defer kernelBTF.Unlock()
+
+ kernelBTF.spec, kernelBTF.fallback = nil, false
+}
+
+func kernelSpec() (*Spec, bool, error) {
+ kernelBTF.RLock()
+ spec, fallback := kernelBTF.spec, kernelBTF.fallback
+ kernelBTF.RUnlock()
+
+ if spec == nil {
+ kernelBTF.Lock()
+ defer kernelBTF.Unlock()
+
+ spec, fallback = kernelBTF.spec, kernelBTF.fallback
+ }
+
+ if spec != nil {
+ return spec, fallback, nil
+ }
+
+ spec, fallback, err := loadKernelSpec()
+ if err != nil {
+ return nil, false, err
+ }
+
+ kernelBTF.spec, kernelBTF.fallback = spec, fallback
+ return spec, fallback, nil
+}
+
+func loadKernelSpec() (_ *Spec, fallback bool, _ error) {
+ fh, err := os.Open("/sys/kernel/btf/vmlinux")
+ if err == nil {
+ defer fh.Close()
+
+ spec, err := loadRawSpec(fh, internal.NativeEndian, nil)
+ return spec, false, err
+ }
+
+ file, err := findVMLinux()
+ if err != nil {
+ return nil, false, err
+ }
+ defer file.Close()
+
+ spec, err := loadSpecFromELF(file)
+ return spec, true, err
+}
+
+// findVMLinux scans multiple well-known paths for vmlinux kernel images.
+func findVMLinux() (*internal.SafeELFFile, error) {
+ release, err := internal.KernelRelease()
+ if err != nil {
+ return nil, err
+ }
+
+ // use same list of locations as libbpf
+ // https://github.com/libbpf/libbpf/blob/9a3a42608dbe3731256a5682a125ac1e23bced8f/src/btf.c#L3114-L3122
+ locations := []string{
+ "/boot/vmlinux-%s",
+ "/lib/modules/%s/vmlinux-%[1]s",
+ "/lib/modules/%s/build/vmlinux",
+ "/usr/lib/modules/%s/kernel/vmlinux",
+ "/usr/lib/debug/boot/vmlinux-%s",
+ "/usr/lib/debug/boot/vmlinux-%s.debug",
+ "/usr/lib/debug/lib/modules/%s/vmlinux",
+ }
+
+ for _, loc := range locations {
+ file, err := internal.OpenSafeELFFile(fmt.Sprintf(loc, release))
+ if errors.Is(err, os.ErrNotExist) {
+ continue
+ }
+ return file, err
+ }
+
+ return nil, fmt.Errorf("no BTF found for kernel version %s: %w", release, internal.ErrNotSupported)
+}
+
+func guessRawBTFByteOrder(r io.ReaderAt) binary.ByteOrder {
+ buf := new(bufio.Reader)
+ for _, bo := range []binary.ByteOrder{
+ binary.LittleEndian,
+ binary.BigEndian,
+ } {
+ buf.Reset(io.NewSectionReader(r, 0, math.MaxInt64))
+ if _, err := parseBTFHeader(buf, bo); err == nil {
+ return bo
+ }
+ }
+
+ return nil
+}
+
+// parseBTF reads a .BTF section into memory and parses it into a list of
+// raw types and a string table.
+func parseBTF(btf io.ReaderAt, bo binary.ByteOrder, baseStrings *stringTable) ([]rawType, *stringTable, error) {
+ buf := internal.NewBufferedSectionReader(btf, 0, math.MaxInt64)
+ header, err := parseBTFHeader(buf, bo)
+ if err != nil {
+ return nil, nil, fmt.Errorf("parsing .BTF header: %v", err)
+ }
+
+ rawStrings, err := readStringTable(io.NewSectionReader(btf, header.stringStart(), int64(header.StringLen)),
+ baseStrings)
+ if err != nil {
+ return nil, nil, fmt.Errorf("can't read type names: %w", err)
+ }
+
+ buf.Reset(io.NewSectionReader(btf, header.typeStart(), int64(header.TypeLen)))
+ rawTypes, err := readTypes(buf, bo, header.TypeLen)
+ if err != nil {
+ return nil, nil, fmt.Errorf("can't read types: %w", err)
+ }
+
+ return rawTypes, rawStrings, nil
+}
+
+type symbol struct {
+ section string
+ name string
+}
+
+// fixupDatasec attempts to patch up missing info in Datasecs and its members by
+// supplementing them with information from the ELF headers and symbol table.
+func fixupDatasec(types []Type, sectionSizes map[string]uint32, offsets map[symbol]uint32) error {
+ for _, typ := range types {
+ ds, ok := typ.(*Datasec)
+ if !ok {
+ continue
+ }
+
+ name := ds.Name
+
+ // Some Datasecs are virtual and don't have corresponding ELF sections.
+ switch name {
+ case ".ksyms":
+ // .ksyms describes forward declarations of kfunc signatures.
+ // Nothing to fix up, all sizes and offsets are 0.
+ for _, vsi := range ds.Vars {
+ _, ok := vsi.Type.(*Func)
+ if !ok {
+ // Only Funcs are supported in the .ksyms Datasec.
+ return fmt.Errorf("data section %s: expected *btf.Func, not %T: %w", name, vsi.Type, ErrNotSupported)
+ }
+ }
+
+ continue
+ case ".kconfig":
+ // .kconfig has a size of 0 and has all members' offsets set to 0.
+ // Fix up all offsets and set the Datasec's size.
+ if err := fixupDatasecLayout(ds); err != nil {
+ return err
+ }
+
+ // Fix up extern to global linkage to avoid a BTF verifier error.
+ for _, vsi := range ds.Vars {
+ vsi.Type.(*Var).Linkage = GlobalVar
+ }
+
+ continue
+ }
+
+ if ds.Size != 0 {
+ continue
+ }
+
+ ds.Size, ok = sectionSizes[name]
+ if !ok {
+ return fmt.Errorf("data section %s: missing size", name)
+ }
+
+ for i := range ds.Vars {
+ symName := ds.Vars[i].Type.TypeName()
+ ds.Vars[i].Offset, ok = offsets[symbol{name, symName}]
+ if !ok {
+ return fmt.Errorf("data section %s: missing offset for symbol %s", name, symName)
+ }
+ }
+ }
+
+ return nil
+}
+
+// fixupDatasecLayout populates ds.Vars[].Offset according to var sizes and
+// alignment. Calculate and set ds.Size.
+func fixupDatasecLayout(ds *Datasec) error {
+ var off uint32
+
+ for i, vsi := range ds.Vars {
+ v, ok := vsi.Type.(*Var)
+ if !ok {
+ return fmt.Errorf("member %d: unsupported type %T", i, vsi.Type)
+ }
+
+ size, err := Sizeof(v.Type)
+ if err != nil {
+ return fmt.Errorf("variable %s: getting size: %w", v.Name, err)
+ }
+ align, err := alignof(v.Type)
+ if err != nil {
+ return fmt.Errorf("variable %s: getting alignment: %w", v.Name, err)
+ }
+
+ // Align the current member based on the offset of the end of the previous
+ // member and the alignment of the current member.
+ off = internal.Align(off, uint32(align))
+
+ ds.Vars[i].Offset = off
+
+ off += uint32(size)
+ }
+
+ ds.Size = off
+
+ return nil
+}
+
+// Copy creates a copy of Spec.
+func (s *Spec) Copy() *Spec {
+ types := copyTypes(s.types, nil)
+ typeIDs, typesByName := indexTypes(types, s.firstTypeID)
+
+ // NB: Other parts of spec are not copied since they are immutable.
+ return &Spec{
+ types,
+ typeIDs,
+ s.firstTypeID,
+ typesByName,
+ s.strings,
+ s.byteOrder,
+ }
+}
+
+type sliceWriter []byte
+
+func (sw sliceWriter) Write(p []byte) (int, error) {
+ if len(p) != len(sw) {
+ return 0, errors.New("size doesn't match")
+ }
+
+ return copy(sw, p), nil
+}
+
+// nextTypeID returns the next unallocated type ID or an error if there are no
+// more type IDs.
+func (s *Spec) nextTypeID() (TypeID, error) {
+ id := s.firstTypeID + TypeID(len(s.types))
+ if id < s.firstTypeID {
+ return 0, fmt.Errorf("no more type IDs")
+ }
+ return id, nil
+}
+
+// TypeByID returns the BTF Type with the given type ID.
+//
+// Returns an error wrapping ErrNotFound if a Type with the given ID
+// does not exist in the Spec.
+func (s *Spec) TypeByID(id TypeID) (Type, error) {
+ if id < s.firstTypeID {
+ return nil, fmt.Errorf("look up type with ID %d (first ID is %d): %w", id, s.firstTypeID, ErrNotFound)
+ }
+
+ index := int(id - s.firstTypeID)
+ if index >= len(s.types) {
+ return nil, fmt.Errorf("look up type with ID %d: %w", id, ErrNotFound)
+ }
+
+ return s.types[index], nil
+}
+
+// TypeID returns the ID for a given Type.
+//
+// Returns an error wrapping ErrNoFound if the type isn't part of the Spec.
+func (s *Spec) TypeID(typ Type) (TypeID, error) {
+ if _, ok := typ.(*Void); ok {
+ // Equality is weird for void, since it is a zero sized type.
+ return 0, nil
+ }
+
+ id, ok := s.typeIDs[typ]
+ if !ok {
+ return 0, fmt.Errorf("no ID for type %s: %w", typ, ErrNotFound)
+ }
+
+ return id, nil
+}
+
+// AnyTypesByName returns a list of BTF Types with the given name.
+//
+// If the BTF blob describes multiple compilation units like vmlinux, multiple
+// Types with the same name and kind can exist, but might not describe the same
+// data structure.
+//
+// Returns an error wrapping ErrNotFound if no matching Type exists in the Spec.
+func (s *Spec) AnyTypesByName(name string) ([]Type, error) {
+ types := s.namedTypes[newEssentialName(name)]
+ if len(types) == 0 {
+ return nil, fmt.Errorf("type name %s: %w", name, ErrNotFound)
+ }
+
+ // Return a copy to prevent changes to namedTypes.
+ result := make([]Type, 0, len(types))
+ for _, t := range types {
+ // Match against the full name, not just the essential one
+ // in case the type being looked up is a struct flavor.
+ if t.TypeName() == name {
+ result = append(result, t)
+ }
+ }
+ return result, nil
+}
+
+// AnyTypeByName returns a Type with the given name.
+//
+// Returns an error if multiple types of that name exist.
+func (s *Spec) AnyTypeByName(name string) (Type, error) {
+ types, err := s.AnyTypesByName(name)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(types) > 1 {
+ return nil, fmt.Errorf("found multiple types: %v", types)
+ }
+
+ return types[0], nil
+}
+
+// TypeByName searches for a Type with a specific name. Since multiple Types
+// with the same name can exist, the parameter typ is taken to narrow down the
+// search in case of a clash.
+//
+// typ must be a non-nil pointer to an implementation of a Type. On success, the
+// address of the found Type will be copied to typ.
+//
+// Returns an error wrapping ErrNotFound if no matching Type exists in the Spec.
+// Returns an error wrapping ErrMultipleTypes if multiple candidates are found.
+func (s *Spec) TypeByName(name string, typ interface{}) error {
+ typeInterface := reflect.TypeOf((*Type)(nil)).Elem()
+
+ // typ may be **T or *Type
+ typValue := reflect.ValueOf(typ)
+ if typValue.Kind() != reflect.Ptr {
+ return fmt.Errorf("%T is not a pointer", typ)
+ }
+
+ typPtr := typValue.Elem()
+ if !typPtr.CanSet() {
+ return fmt.Errorf("%T cannot be set", typ)
+ }
+
+ wanted := typPtr.Type()
+ if wanted == typeInterface {
+ // This is *Type. Unwrap the value's type.
+ wanted = typPtr.Elem().Type()
+ }
+
+ if !wanted.AssignableTo(typeInterface) {
+ return fmt.Errorf("%T does not satisfy Type interface", typ)
+ }
+
+ types, err := s.AnyTypesByName(name)
+ if err != nil {
+ return err
+ }
+
+ var candidate Type
+ for _, typ := range types {
+ if reflect.TypeOf(typ) != wanted {
+ continue
+ }
+
+ if candidate != nil {
+ return fmt.Errorf("type %s(%T): %w", name, typ, ErrMultipleMatches)
+ }
+
+ candidate = typ
+ }
+
+ if candidate == nil {
+ return fmt.Errorf("%s %s: %w", wanted, name, ErrNotFound)
+ }
+
+ typPtr.Set(reflect.ValueOf(candidate))
+
+ return nil
+}
+
+// LoadSplitSpecFromReader loads split BTF from a reader.
+//
+// Types from base are used to resolve references in the split BTF.
+// The returned Spec only contains types from the split BTF, not from the base.
+func LoadSplitSpecFromReader(r io.ReaderAt, base *Spec) (*Spec, error) {
+ return loadRawSpec(r, internal.NativeEndian, base)
+}
+
+// TypesIterator iterates over types of a given spec.
+type TypesIterator struct {
+ types []Type
+ index int
+ // The last visited type in the spec.
+ Type Type
+}
+
+// Iterate returns the types iterator.
+func (s *Spec) Iterate() *TypesIterator {
+ // We share the backing array of types with the Spec. This is safe since
+ // we don't allow deletion or shuffling of types.
+ return &TypesIterator{types: s.types, index: 0}
+}
+
+// Next returns true as long as there are any remaining types.
+func (iter *TypesIterator) Next() bool {
+ if len(iter.types) <= iter.index {
+ return false
+ }
+
+ iter.Type = iter.types[iter.index]
+ iter.index++
+ return true
+}
diff --git a/vendor/github.com/cilium/ebpf/btf/btf_types.go b/vendor/github.com/cilium/ebpf/btf/btf_types.go
new file mode 100644
index 000000000..c9984b2d4
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/btf/btf_types.go
@@ -0,0 +1,431 @@
+package btf
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "unsafe"
+
+ "github.com/cilium/ebpf/internal"
+)
+
+//go:generate go run golang.org/x/tools/cmd/stringer@latest -linecomment -output=btf_types_string.go -type=FuncLinkage,VarLinkage,btfKind
+
+// btfKind describes a Type.
+type btfKind uint8
+
+// Equivalents of the BTF_KIND_* constants.
+const (
+ kindUnknown btfKind = iota // Unknown
+ kindInt // Int
+ kindPointer // Pointer
+ kindArray // Array
+ kindStruct // Struct
+ kindUnion // Union
+ kindEnum // Enum
+ kindForward // Forward
+ kindTypedef // Typedef
+ kindVolatile // Volatile
+ kindConst // Const
+ kindRestrict // Restrict
+ // Added ~4.20
+ kindFunc // Func
+ kindFuncProto // FuncProto
+ // Added ~5.1
+ kindVar // Var
+ kindDatasec // Datasec
+ // Added ~5.13
+ kindFloat // Float
+ // Added 5.16
+ kindDeclTag // DeclTag
+ kindTypeTag // TypeTag
+ // Added 6.0
+ kindEnum64 // Enum64
+)
+
+// FuncLinkage describes BTF function linkage metadata.
+type FuncLinkage int
+
+// Equivalent of enum btf_func_linkage.
+const (
+ StaticFunc FuncLinkage = iota // static
+ GlobalFunc // global
+ ExternFunc // extern
+)
+
+// VarLinkage describes BTF variable linkage metadata.
+type VarLinkage int
+
+const (
+ StaticVar VarLinkage = iota // static
+ GlobalVar // global
+ ExternVar // extern
+)
+
+const (
+ btfTypeKindShift = 24
+ btfTypeKindLen = 5
+ btfTypeVlenShift = 0
+ btfTypeVlenMask = 16
+ btfTypeKindFlagShift = 31
+ btfTypeKindFlagMask = 1
+)
+
+var btfHeaderLen = binary.Size(&btfHeader{})
+
+type btfHeader struct {
+ Magic uint16
+ Version uint8
+ Flags uint8
+ HdrLen uint32
+
+ TypeOff uint32
+ TypeLen uint32
+ StringOff uint32
+ StringLen uint32
+}
+
+// typeStart returns the offset from the beginning of the .BTF section
+// to the start of its type entries.
+func (h *btfHeader) typeStart() int64 {
+ return int64(h.HdrLen + h.TypeOff)
+}
+
+// stringStart returns the offset from the beginning of the .BTF section
+// to the start of its string table.
+func (h *btfHeader) stringStart() int64 {
+ return int64(h.HdrLen + h.StringOff)
+}
+
+// parseBTFHeader parses the header of the .BTF section.
+func parseBTFHeader(r io.Reader, bo binary.ByteOrder) (*btfHeader, error) {
+ var header btfHeader
+ if err := binary.Read(r, bo, &header); err != nil {
+ return nil, fmt.Errorf("can't read header: %v", err)
+ }
+
+ if header.Magic != btfMagic {
+ return nil, fmt.Errorf("incorrect magic value %v", header.Magic)
+ }
+
+ if header.Version != 1 {
+ return nil, fmt.Errorf("unexpected version %v", header.Version)
+ }
+
+ if header.Flags != 0 {
+ return nil, fmt.Errorf("unsupported flags %v", header.Flags)
+ }
+
+ remainder := int64(header.HdrLen) - int64(binary.Size(&header))
+ if remainder < 0 {
+ return nil, errors.New("header length shorter than btfHeader size")
+ }
+
+ if _, err := io.CopyN(internal.DiscardZeroes{}, r, remainder); err != nil {
+ return nil, fmt.Errorf("header padding: %v", err)
+ }
+
+ return &header, nil
+}
+
+var btfTypeLen = binary.Size(btfType{})
+
+// btfType is equivalent to struct btf_type in Documentation/bpf/btf.rst.
+type btfType struct {
+ NameOff uint32
+ /* "info" bits arrangement
+ * bits 0-15: vlen (e.g. # of struct's members), linkage
+ * bits 16-23: unused
+ * bits 24-28: kind (e.g. int, ptr, array...etc)
+ * bits 29-30: unused
+ * bit 31: kind_flag, currently used by
+ * struct, union and fwd
+ */
+ Info uint32
+ /* "size" is used by INT, ENUM, STRUCT and UNION.
+ * "size" tells the size of the type it is describing.
+ *
+ * "type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT,
+ * FUNC and FUNC_PROTO.
+ * "type" is a type_id referring to another type.
+ */
+ SizeType uint32
+}
+
+func mask(len uint32) uint32 {
+ return (1 << len) - 1
+}
+
+func readBits(value, len, shift uint32) uint32 {
+ return (value >> shift) & mask(len)
+}
+
+func writeBits(value, len, shift, new uint32) uint32 {
+ value &^= mask(len) << shift
+ value |= (new & mask(len)) << shift
+ return value
+}
+
+func (bt *btfType) info(len, shift uint32) uint32 {
+ return readBits(bt.Info, len, shift)
+}
+
+func (bt *btfType) setInfo(value, len, shift uint32) {
+ bt.Info = writeBits(bt.Info, len, shift, value)
+}
+
+func (bt *btfType) Kind() btfKind {
+ return btfKind(bt.info(btfTypeKindLen, btfTypeKindShift))
+}
+
+func (bt *btfType) SetKind(kind btfKind) {
+ bt.setInfo(uint32(kind), btfTypeKindLen, btfTypeKindShift)
+}
+
+func (bt *btfType) Vlen() int {
+ return int(bt.info(btfTypeVlenMask, btfTypeVlenShift))
+}
+
+func (bt *btfType) SetVlen(vlen int) {
+ bt.setInfo(uint32(vlen), btfTypeVlenMask, btfTypeVlenShift)
+}
+
+func (bt *btfType) kindFlagBool() bool {
+ return bt.info(btfTypeKindFlagMask, btfTypeKindFlagShift) == 1
+}
+
+func (bt *btfType) setKindFlagBool(set bool) {
+ var value uint32
+ if set {
+ value = 1
+ }
+ bt.setInfo(value, btfTypeKindFlagMask, btfTypeKindFlagShift)
+}
+
+// Bitfield returns true if the struct or union contain a bitfield.
+func (bt *btfType) Bitfield() bool {
+ return bt.kindFlagBool()
+}
+
+func (bt *btfType) SetBitfield(isBitfield bool) {
+ bt.setKindFlagBool(isBitfield)
+}
+
+func (bt *btfType) FwdKind() FwdKind {
+ return FwdKind(bt.info(btfTypeKindFlagMask, btfTypeKindFlagShift))
+}
+
+func (bt *btfType) SetFwdKind(kind FwdKind) {
+ bt.setInfo(uint32(kind), btfTypeKindFlagMask, btfTypeKindFlagShift)
+}
+
+func (bt *btfType) Signed() bool {
+ return bt.kindFlagBool()
+}
+
+func (bt *btfType) SetSigned(signed bool) {
+ bt.setKindFlagBool(signed)
+}
+
+func (bt *btfType) Linkage() FuncLinkage {
+ return FuncLinkage(bt.info(btfTypeVlenMask, btfTypeVlenShift))
+}
+
+func (bt *btfType) SetLinkage(linkage FuncLinkage) {
+ bt.setInfo(uint32(linkage), btfTypeVlenMask, btfTypeVlenShift)
+}
+
+func (bt *btfType) Type() TypeID {
+ // TODO: Panic here if wrong kind?
+ return TypeID(bt.SizeType)
+}
+
+func (bt *btfType) SetType(id TypeID) {
+ bt.SizeType = uint32(id)
+}
+
+func (bt *btfType) Size() uint32 {
+ // TODO: Panic here if wrong kind?
+ return bt.SizeType
+}
+
+func (bt *btfType) SetSize(size uint32) {
+ bt.SizeType = size
+}
+
+func (bt *btfType) Marshal(w io.Writer, bo binary.ByteOrder) error {
+ buf := make([]byte, unsafe.Sizeof(*bt))
+ bo.PutUint32(buf[0:], bt.NameOff)
+ bo.PutUint32(buf[4:], bt.Info)
+ bo.PutUint32(buf[8:], bt.SizeType)
+ _, err := w.Write(buf)
+ return err
+}
+
+type rawType struct {
+ btfType
+ data interface{}
+}
+
+func (rt *rawType) Marshal(w io.Writer, bo binary.ByteOrder) error {
+ if err := rt.btfType.Marshal(w, bo); err != nil {
+ return err
+ }
+
+ if rt.data == nil {
+ return nil
+ }
+
+ return binary.Write(w, bo, rt.data)
+}
+
+// btfInt encodes additional data for integers.
+//
+// ? ? ? ? e e e e o o o o o o o o ? ? ? ? ? ? ? ? b b b b b b b b
+// ? = undefined
+// e = encoding
+// o = offset (bitfields?)
+// b = bits (bitfields)
+type btfInt struct {
+ Raw uint32
+}
+
+const (
+ btfIntEncodingLen = 4
+ btfIntEncodingShift = 24
+ btfIntOffsetLen = 8
+ btfIntOffsetShift = 16
+ btfIntBitsLen = 8
+ btfIntBitsShift = 0
+)
+
+func (bi btfInt) Encoding() IntEncoding {
+ return IntEncoding(readBits(bi.Raw, btfIntEncodingLen, btfIntEncodingShift))
+}
+
+func (bi *btfInt) SetEncoding(e IntEncoding) {
+ bi.Raw = writeBits(uint32(bi.Raw), btfIntEncodingLen, btfIntEncodingShift, uint32(e))
+}
+
+func (bi btfInt) Offset() Bits {
+ return Bits(readBits(bi.Raw, btfIntOffsetLen, btfIntOffsetShift))
+}
+
+func (bi *btfInt) SetOffset(offset uint32) {
+ bi.Raw = writeBits(bi.Raw, btfIntOffsetLen, btfIntOffsetShift, offset)
+}
+
+func (bi btfInt) Bits() Bits {
+ return Bits(readBits(bi.Raw, btfIntBitsLen, btfIntBitsShift))
+}
+
+func (bi *btfInt) SetBits(bits byte) {
+ bi.Raw = writeBits(bi.Raw, btfIntBitsLen, btfIntBitsShift, uint32(bits))
+}
+
+type btfArray struct {
+ Type TypeID
+ IndexType TypeID
+ Nelems uint32
+}
+
+type btfMember struct {
+ NameOff uint32
+ Type TypeID
+ Offset uint32
+}
+
+type btfVarSecinfo struct {
+ Type TypeID
+ Offset uint32
+ Size uint32
+}
+
+type btfVariable struct {
+ Linkage uint32
+}
+
+type btfEnum struct {
+ NameOff uint32
+ Val uint32
+}
+
+type btfEnum64 struct {
+ NameOff uint32
+ ValLo32 uint32
+ ValHi32 uint32
+}
+
+type btfParam struct {
+ NameOff uint32
+ Type TypeID
+}
+
+type btfDeclTag struct {
+ ComponentIdx uint32
+}
+
+func readTypes(r io.Reader, bo binary.ByteOrder, typeLen uint32) ([]rawType, error) {
+ var header btfType
+ // because of the interleaving between types and struct members it is difficult to
+ // precompute the numbers of raw types this will parse
+ // this "guess" is a good first estimation
+ sizeOfbtfType := uintptr(btfTypeLen)
+ tyMaxCount := uintptr(typeLen) / sizeOfbtfType / 2
+ types := make([]rawType, 0, tyMaxCount)
+
+ for id := TypeID(1); ; id++ {
+ if err := binary.Read(r, bo, &header); err == io.EOF {
+ return types, nil
+ } else if err != nil {
+ return nil, fmt.Errorf("can't read type info for id %v: %v", id, err)
+ }
+
+ var data interface{}
+ switch header.Kind() {
+ case kindInt:
+ data = new(btfInt)
+ case kindPointer:
+ case kindArray:
+ data = new(btfArray)
+ case kindStruct:
+ fallthrough
+ case kindUnion:
+ data = make([]btfMember, header.Vlen())
+ case kindEnum:
+ data = make([]btfEnum, header.Vlen())
+ case kindForward:
+ case kindTypedef:
+ case kindVolatile:
+ case kindConst:
+ case kindRestrict:
+ case kindFunc:
+ case kindFuncProto:
+ data = make([]btfParam, header.Vlen())
+ case kindVar:
+ data = new(btfVariable)
+ case kindDatasec:
+ data = make([]btfVarSecinfo, header.Vlen())
+ case kindFloat:
+ case kindDeclTag:
+ data = new(btfDeclTag)
+ case kindTypeTag:
+ case kindEnum64:
+ data = make([]btfEnum64, header.Vlen())
+ default:
+ return nil, fmt.Errorf("type id %v: unknown kind: %v", id, header.Kind())
+ }
+
+ if data == nil {
+ types = append(types, rawType{header, nil})
+ continue
+ }
+
+ if err := binary.Read(r, bo, data); err != nil {
+ return nil, fmt.Errorf("type id %d: kind %v: can't read %T: %v", id, header.Kind(), data, err)
+ }
+
+ types = append(types, rawType{header, data})
+ }
+}
diff --git a/vendor/github.com/cilium/ebpf/btf/btf_types_string.go b/vendor/github.com/cilium/ebpf/btf/btf_types_string.go
new file mode 100644
index 000000000..b7a1b80d1
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/btf/btf_types_string.go
@@ -0,0 +1,80 @@
+// Code generated by "stringer -linecomment -output=btf_types_string.go -type=FuncLinkage,VarLinkage,btfKind"; DO NOT EDIT.
+
+package btf
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[StaticFunc-0]
+ _ = x[GlobalFunc-1]
+ _ = x[ExternFunc-2]
+}
+
+const _FuncLinkage_name = "staticglobalextern"
+
+var _FuncLinkage_index = [...]uint8{0, 6, 12, 18}
+
+func (i FuncLinkage) String() string {
+ if i < 0 || i >= FuncLinkage(len(_FuncLinkage_index)-1) {
+ return "FuncLinkage(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _FuncLinkage_name[_FuncLinkage_index[i]:_FuncLinkage_index[i+1]]
+}
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[StaticVar-0]
+ _ = x[GlobalVar-1]
+ _ = x[ExternVar-2]
+}
+
+const _VarLinkage_name = "staticglobalextern"
+
+var _VarLinkage_index = [...]uint8{0, 6, 12, 18}
+
+func (i VarLinkage) String() string {
+ if i < 0 || i >= VarLinkage(len(_VarLinkage_index)-1) {
+ return "VarLinkage(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _VarLinkage_name[_VarLinkage_index[i]:_VarLinkage_index[i+1]]
+}
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[kindUnknown-0]
+ _ = x[kindInt-1]
+ _ = x[kindPointer-2]
+ _ = x[kindArray-3]
+ _ = x[kindStruct-4]
+ _ = x[kindUnion-5]
+ _ = x[kindEnum-6]
+ _ = x[kindForward-7]
+ _ = x[kindTypedef-8]
+ _ = x[kindVolatile-9]
+ _ = x[kindConst-10]
+ _ = x[kindRestrict-11]
+ _ = x[kindFunc-12]
+ _ = x[kindFuncProto-13]
+ _ = x[kindVar-14]
+ _ = x[kindDatasec-15]
+ _ = x[kindFloat-16]
+ _ = x[kindDeclTag-17]
+ _ = x[kindTypeTag-18]
+ _ = x[kindEnum64-19]
+}
+
+const _btfKind_name = "UnknownIntPointerArrayStructUnionEnumForwardTypedefVolatileConstRestrictFuncFuncProtoVarDatasecFloatDeclTagTypeTagEnum64"
+
+var _btfKind_index = [...]uint8{0, 7, 10, 17, 22, 28, 33, 37, 44, 51, 59, 64, 72, 76, 85, 88, 95, 100, 107, 114, 120}
+
+func (i btfKind) String() string {
+ if i >= btfKind(len(_btfKind_index)-1) {
+ return "btfKind(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _btfKind_name[_btfKind_index[i]:_btfKind_index[i+1]]
+}
diff --git a/vendor/github.com/cilium/ebpf/btf/core.go b/vendor/github.com/cilium/ebpf/btf/core.go
new file mode 100644
index 000000000..a5c40d36a
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/btf/core.go
@@ -0,0 +1,1011 @@
+package btf
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "math"
+ "reflect"
+ "strconv"
+ "strings"
+
+ "github.com/cilium/ebpf/asm"
+)
+
+// Code in this file is derived from libbpf, which is available under a BSD
+// 2-Clause license.
+
+// COREFixup is the result of computing a CO-RE relocation for a target.
+type COREFixup struct {
+ kind coreKind
+ local uint32
+ target uint32
+ // True if there is no valid fixup. The instruction is replaced with an
+ // invalid dummy.
+ poison bool
+ // True if the validation of the local value should be skipped. Used by
+ // some kinds of bitfield relocations.
+ skipLocalValidation bool
+}
+
+func (f *COREFixup) equal(other COREFixup) bool {
+ return f.local == other.local && f.target == other.target
+}
+
+func (f *COREFixup) String() string {
+ if f.poison {
+ return fmt.Sprintf("%s=poison", f.kind)
+ }
+ return fmt.Sprintf("%s=%d->%d", f.kind, f.local, f.target)
+}
+
+func (f *COREFixup) Apply(ins *asm.Instruction) error {
+ if f.poison {
+ const badRelo = 0xbad2310
+
+ *ins = asm.BuiltinFunc(badRelo).Call()
+ return nil
+ }
+
+ switch class := ins.OpCode.Class(); class {
+ case asm.LdXClass, asm.StClass, asm.StXClass:
+ if want := int16(f.local); !f.skipLocalValidation && want != ins.Offset {
+ return fmt.Errorf("invalid offset %d, expected %d", ins.Offset, f.local)
+ }
+
+ if f.target > math.MaxInt16 {
+ return fmt.Errorf("offset %d exceeds MaxInt16", f.target)
+ }
+
+ ins.Offset = int16(f.target)
+
+ case asm.LdClass:
+ if !ins.IsConstantLoad(asm.DWord) {
+ return fmt.Errorf("not a dword-sized immediate load")
+ }
+
+ if want := int64(f.local); !f.skipLocalValidation && want != ins.Constant {
+ return fmt.Errorf("invalid immediate %d, expected %d (fixup: %v)", ins.Constant, want, f)
+ }
+
+ ins.Constant = int64(f.target)
+
+ case asm.ALUClass:
+ if ins.OpCode.ALUOp() == asm.Swap {
+ return fmt.Errorf("relocation against swap")
+ }
+
+ fallthrough
+
+ case asm.ALU64Class:
+ if src := ins.OpCode.Source(); src != asm.ImmSource {
+ return fmt.Errorf("invalid source %s", src)
+ }
+
+ if want := int64(f.local); !f.skipLocalValidation && want != ins.Constant {
+ return fmt.Errorf("invalid immediate %d, expected %d (fixup: %v, kind: %v, ins: %v)", ins.Constant, want, f, f.kind, ins)
+ }
+
+ if f.target > math.MaxInt32 {
+ return fmt.Errorf("immediate %d exceeds MaxInt32", f.target)
+ }
+
+ ins.Constant = int64(f.target)
+
+ default:
+ return fmt.Errorf("invalid class %s", class)
+ }
+
+ return nil
+}
+
+func (f COREFixup) isNonExistant() bool {
+ return f.kind.checksForExistence() && f.target == 0
+}
+
+// coreKind is the type of CO-RE relocation as specified in BPF source code.
+type coreKind uint32
+
+const (
+ reloFieldByteOffset coreKind = iota /* field byte offset */
+ reloFieldByteSize /* field size in bytes */
+ reloFieldExists /* field existence in target kernel */
+ reloFieldSigned /* field signedness (0 - unsigned, 1 - signed) */
+ reloFieldLShiftU64 /* bitfield-specific left bitshift */
+ reloFieldRShiftU64 /* bitfield-specific right bitshift */
+ reloTypeIDLocal /* type ID in local BPF object */
+ reloTypeIDTarget /* type ID in target kernel */
+ reloTypeExists /* type existence in target kernel */
+ reloTypeSize /* type size in bytes */
+ reloEnumvalExists /* enum value existence in target kernel */
+ reloEnumvalValue /* enum value integer value */
+)
+
+func (k coreKind) checksForExistence() bool {
+ return k == reloEnumvalExists || k == reloTypeExists || k == reloFieldExists
+}
+
+func (k coreKind) String() string {
+ switch k {
+ case reloFieldByteOffset:
+ return "byte_off"
+ case reloFieldByteSize:
+ return "byte_sz"
+ case reloFieldExists:
+ return "field_exists"
+ case reloFieldSigned:
+ return "signed"
+ case reloFieldLShiftU64:
+ return "lshift_u64"
+ case reloFieldRShiftU64:
+ return "rshift_u64"
+ case reloTypeIDLocal:
+ return "local_type_id"
+ case reloTypeIDTarget:
+ return "target_type_id"
+ case reloTypeExists:
+ return "type_exists"
+ case reloTypeSize:
+ return "type_size"
+ case reloEnumvalExists:
+ return "enumval_exists"
+ case reloEnumvalValue:
+ return "enumval_value"
+ default:
+ return "unknown"
+ }
+}
+
+// CORERelocate calculates changes needed to adjust eBPF instructions for differences
+// in types.
+//
+// Returns a list of fixups which can be applied to instructions to make them
+// match the target type(s).
+//
+// Fixups are returned in the order of relos, e.g. fixup[i] is the solution
+// for relos[i].
+func CORERelocate(relos []*CORERelocation, target *Spec, bo binary.ByteOrder) ([]COREFixup, error) {
+ if target == nil {
+ var err error
+ target, _, err = kernelSpec()
+ if err != nil {
+ return nil, fmt.Errorf("load kernel spec: %w", err)
+ }
+ }
+
+ if bo != target.byteOrder {
+ return nil, fmt.Errorf("can't relocate %s against %s", bo, target.byteOrder)
+ }
+
+ type reloGroup struct {
+ relos []*CORERelocation
+ // Position of each relocation in relos.
+ indices []int
+ }
+
+ // Split relocations into per Type lists.
+ relosByType := make(map[Type]*reloGroup)
+ result := make([]COREFixup, len(relos))
+ for i, relo := range relos {
+ if relo.kind == reloTypeIDLocal {
+ // Filtering out reloTypeIDLocal here makes our lives a lot easier
+ // down the line, since it doesn't have a target at all.
+ if len(relo.accessor) > 1 || relo.accessor[0] != 0 {
+ return nil, fmt.Errorf("%s: unexpected accessor %v", relo.kind, relo.accessor)
+ }
+
+ result[i] = COREFixup{
+ kind: relo.kind,
+ local: uint32(relo.id),
+ // NB: Using relo.id as the target here is incorrect, since
+ // it doesn't match the BTF we generate on the fly. This isn't
+ // too bad for now since there are no uses of the local type ID
+ // in the kernel, yet.
+ target: uint32(relo.id),
+ }
+ continue
+ }
+
+ group, ok := relosByType[relo.typ]
+ if !ok {
+ group = &reloGroup{}
+ relosByType[relo.typ] = group
+ }
+ group.relos = append(group.relos, relo)
+ group.indices = append(group.indices, i)
+ }
+
+ for localType, group := range relosByType {
+ localTypeName := localType.TypeName()
+ if localTypeName == "" {
+ return nil, fmt.Errorf("relocate unnamed or anonymous type %s: %w", localType, ErrNotSupported)
+ }
+
+ targets := target.namedTypes[newEssentialName(localTypeName)]
+ fixups, err := coreCalculateFixups(group.relos, target, targets, bo)
+ if err != nil {
+ return nil, fmt.Errorf("relocate %s: %w", localType, err)
+ }
+
+ for j, index := range group.indices {
+ result[index] = fixups[j]
+ }
+ }
+
+ return result, nil
+}
+
+var errAmbiguousRelocation = errors.New("ambiguous relocation")
+var errImpossibleRelocation = errors.New("impossible relocation")
+var errIncompatibleTypes = errors.New("incompatible types")
+
+// coreCalculateFixups finds the target type that best matches all relocations.
+//
+// All relos must target the same type.
+//
+// The best target is determined by scoring: the less poisoning we have to do
+// the better the target is.
+func coreCalculateFixups(relos []*CORERelocation, targetSpec *Spec, targets []Type, bo binary.ByteOrder) ([]COREFixup, error) {
+ bestScore := len(relos)
+ var bestFixups []COREFixup
+ for _, target := range targets {
+ targetID, err := targetSpec.TypeID(target)
+ if err != nil {
+ return nil, fmt.Errorf("target type ID: %w", err)
+ }
+
+ score := 0 // lower is better
+ fixups := make([]COREFixup, 0, len(relos))
+ for _, relo := range relos {
+ fixup, err := coreCalculateFixup(relo, target, targetID, bo)
+ if err != nil {
+ return nil, fmt.Errorf("target %s: %s: %w", target, relo.kind, err)
+ }
+ if fixup.poison || fixup.isNonExistant() {
+ score++
+ }
+ fixups = append(fixups, fixup)
+ }
+
+ if score > bestScore {
+ // We have a better target already, ignore this one.
+ continue
+ }
+
+ if score < bestScore {
+ // This is the best target yet, use it.
+ bestScore = score
+ bestFixups = fixups
+ continue
+ }
+
+ // Some other target has the same score as the current one. Make sure
+ // the fixups agree with each other.
+ for i, fixup := range bestFixups {
+ if !fixup.equal(fixups[i]) {
+ return nil, fmt.Errorf("%s: multiple types match: %w", fixup.kind, errAmbiguousRelocation)
+ }
+ }
+ }
+
+ if bestFixups == nil {
+ // Nothing at all matched, probably because there are no suitable
+ // targets at all.
+ //
+ // Poison everything except checksForExistence.
+ bestFixups = make([]COREFixup, len(relos))
+ for i, relo := range relos {
+ if relo.kind.checksForExistence() {
+ bestFixups[i] = COREFixup{kind: relo.kind, local: 1, target: 0}
+ } else {
+ bestFixups[i] = COREFixup{kind: relo.kind, poison: true}
+ }
+ }
+ }
+
+ return bestFixups, nil
+}
+
+var errNoSignedness = errors.New("no signedness")
+
+// coreCalculateFixup calculates the fixup for a single local type, target type
+// and relocation.
+func coreCalculateFixup(relo *CORERelocation, target Type, targetID TypeID, bo binary.ByteOrder) (COREFixup, error) {
+ fixup := func(local, target uint32) (COREFixup, error) {
+ return COREFixup{kind: relo.kind, local: local, target: target}, nil
+ }
+ fixupWithoutValidation := func(local, target uint32) (COREFixup, error) {
+ return COREFixup{kind: relo.kind, local: local, target: target, skipLocalValidation: true}, nil
+ }
+ poison := func() (COREFixup, error) {
+ if relo.kind.checksForExistence() {
+ return fixup(1, 0)
+ }
+ return COREFixup{kind: relo.kind, poison: true}, nil
+ }
+ zero := COREFixup{}
+
+ local := relo.typ
+
+ switch relo.kind {
+ case reloTypeIDTarget, reloTypeSize, reloTypeExists:
+ if len(relo.accessor) > 1 || relo.accessor[0] != 0 {
+ return zero, fmt.Errorf("unexpected accessor %v", relo.accessor)
+ }
+
+ err := coreAreTypesCompatible(local, target)
+ if errors.Is(err, errIncompatibleTypes) {
+ return poison()
+ }
+ if err != nil {
+ return zero, err
+ }
+
+ switch relo.kind {
+ case reloTypeExists:
+ return fixup(1, 1)
+
+ case reloTypeIDTarget:
+ return fixup(uint32(relo.id), uint32(targetID))
+
+ case reloTypeSize:
+ localSize, err := Sizeof(local)
+ if err != nil {
+ return zero, err
+ }
+
+ targetSize, err := Sizeof(target)
+ if err != nil {
+ return zero, err
+ }
+
+ return fixup(uint32(localSize), uint32(targetSize))
+ }
+
+ case reloEnumvalValue, reloEnumvalExists:
+ localValue, targetValue, err := coreFindEnumValue(local, relo.accessor, target)
+ if errors.Is(err, errImpossibleRelocation) {
+ return poison()
+ }
+ if err != nil {
+ return zero, err
+ }
+
+ switch relo.kind {
+ case reloEnumvalExists:
+ return fixup(1, 1)
+
+ case reloEnumvalValue:
+ return fixup(uint32(localValue.Value), uint32(targetValue.Value))
+ }
+
+ case reloFieldByteOffset, reloFieldByteSize, reloFieldExists, reloFieldLShiftU64, reloFieldRShiftU64, reloFieldSigned:
+ if _, ok := as[*Fwd](target); ok {
+ // We can't relocate fields using a forward declaration, so
+ // skip it. If a non-forward declaration is present in the BTF
+ // we'll find it in one of the other iterations.
+ return poison()
+ }
+
+ localField, targetField, err := coreFindField(local, relo.accessor, target)
+ if errors.Is(err, errImpossibleRelocation) {
+ return poison()
+ }
+ if err != nil {
+ return zero, err
+ }
+
+ maybeSkipValidation := func(f COREFixup, err error) (COREFixup, error) {
+ f.skipLocalValidation = localField.bitfieldSize > 0
+ return f, err
+ }
+
+ switch relo.kind {
+ case reloFieldExists:
+ return fixup(1, 1)
+
+ case reloFieldByteOffset:
+ return maybeSkipValidation(fixup(localField.offset, targetField.offset))
+
+ case reloFieldByteSize:
+ localSize, err := Sizeof(localField.Type)
+ if err != nil {
+ return zero, err
+ }
+
+ targetSize, err := Sizeof(targetField.Type)
+ if err != nil {
+ return zero, err
+ }
+ return maybeSkipValidation(fixup(uint32(localSize), uint32(targetSize)))
+
+ case reloFieldLShiftU64:
+ var target uint32
+ if bo == binary.LittleEndian {
+ targetSize, err := targetField.sizeBits()
+ if err != nil {
+ return zero, err
+ }
+
+ target = uint32(64 - targetField.bitfieldOffset - targetSize)
+ } else {
+ loadWidth, err := Sizeof(targetField.Type)
+ if err != nil {
+ return zero, err
+ }
+
+ target = uint32(64 - Bits(loadWidth*8) + targetField.bitfieldOffset)
+ }
+ return fixupWithoutValidation(0, target)
+
+ case reloFieldRShiftU64:
+ targetSize, err := targetField.sizeBits()
+ if err != nil {
+ return zero, err
+ }
+
+ return fixupWithoutValidation(0, uint32(64-targetSize))
+
+ case reloFieldSigned:
+ switch local := UnderlyingType(localField.Type).(type) {
+ case *Enum:
+ target, ok := as[*Enum](targetField.Type)
+ if !ok {
+ return zero, fmt.Errorf("target isn't *Enum but %T", targetField.Type)
+ }
+
+ return fixup(boolToUint32(local.Signed), boolToUint32(target.Signed))
+ case *Int:
+ target, ok := as[*Int](targetField.Type)
+ if !ok {
+ return zero, fmt.Errorf("target isn't *Int but %T", targetField.Type)
+ }
+
+ return fixup(
+ uint32(local.Encoding&Signed),
+ uint32(target.Encoding&Signed),
+ )
+ default:
+ return zero, fmt.Errorf("type %T: %w", local, errNoSignedness)
+ }
+ }
+ }
+
+ return zero, ErrNotSupported
+}
+
+func boolToUint32(val bool) uint32 {
+ if val {
+ return 1
+ }
+ return 0
+}
+
+/* coreAccessor contains a path through a struct. It contains at least one index.
+ *
+ * The interpretation depends on the kind of the relocation. The following is
+ * taken from struct bpf_core_relo in libbpf_internal.h:
+ *
+ * - for field-based relocations, string encodes an accessed field using
+ * a sequence of field and array indices, separated by colon (:). It's
+ * conceptually very close to LLVM's getelementptr ([0]) instruction's
+ * arguments for identifying offset to a field.
+ * - for type-based relocations, strings is expected to be just "0";
+ * - for enum value-based relocations, string contains an index of enum
+ * value within its enum type;
+ *
+ * Example to provide a better feel.
+ *
+ * struct sample {
+ * int a;
+ * struct {
+ * int b[10];
+ * };
+ * };
+ *
+ * struct sample s = ...;
+ * int x = &s->a; // encoded as "0:0" (a is field #0)
+ * int y = &s->b[5]; // encoded as "0:1:0:5" (anon struct is field #1,
+ * // b is field #0 inside anon struct, accessing elem #5)
+ * int z = &s[10]->b; // encoded as "10:1" (ptr is used as an array)
+ */
+type coreAccessor []int
+
+func parseCOREAccessor(accessor string) (coreAccessor, error) {
+ if accessor == "" {
+ return nil, fmt.Errorf("empty accessor")
+ }
+
+ parts := strings.Split(accessor, ":")
+ result := make(coreAccessor, 0, len(parts))
+ for _, part := range parts {
+ // 31 bits to avoid overflowing int on 32 bit platforms.
+ index, err := strconv.ParseUint(part, 10, 31)
+ if err != nil {
+ return nil, fmt.Errorf("accessor index %q: %s", part, err)
+ }
+
+ result = append(result, int(index))
+ }
+
+ return result, nil
+}
+
+func (ca coreAccessor) String() string {
+ strs := make([]string, 0, len(ca))
+ for _, i := range ca {
+ strs = append(strs, strconv.Itoa(i))
+ }
+ return strings.Join(strs, ":")
+}
+
+func (ca coreAccessor) enumValue(t Type) (*EnumValue, error) {
+ e, ok := as[*Enum](t)
+ if !ok {
+ return nil, fmt.Errorf("not an enum: %s", t)
+ }
+
+ if len(ca) > 1 {
+ return nil, fmt.Errorf("invalid accessor %s for enum", ca)
+ }
+
+ i := ca[0]
+ if i >= len(e.Values) {
+ return nil, fmt.Errorf("invalid index %d for %s", i, e)
+ }
+
+ return &e.Values[i], nil
+}
+
+// coreField represents the position of a "child" of a composite type from the
+// start of that type.
+//
+// /- start of composite
+// | offset * 8 | bitfieldOffset | bitfieldSize | ... |
+// \- start of field end of field -/
+type coreField struct {
+ Type Type
+
+ // The position of the field from the start of the composite type in bytes.
+ offset uint32
+
+ // The offset of the bitfield in bits from the start of the field.
+ bitfieldOffset Bits
+
+ // The size of the bitfield in bits.
+ //
+ // Zero if the field is not a bitfield.
+ bitfieldSize Bits
+}
+
+func (cf *coreField) adjustOffsetToNthElement(n int) error {
+ if n == 0 {
+ return nil
+ }
+
+ size, err := Sizeof(cf.Type)
+ if err != nil {
+ return err
+ }
+
+ cf.offset += uint32(n) * uint32(size)
+ return nil
+}
+
+func (cf *coreField) adjustOffsetBits(offset Bits) error {
+ align, err := alignof(cf.Type)
+ if err != nil {
+ return err
+ }
+
+ // We can compute the load offset by:
+ // 1) converting the bit offset to bytes with a flooring division.
+ // 2) dividing and multiplying that offset by the alignment, yielding the
+ // load size aligned offset.
+ offsetBytes := uint32(offset/8) / uint32(align) * uint32(align)
+
+ // The number of bits remaining is the bit offset less the number of bits
+ // we can "skip" with the aligned offset.
+ cf.bitfieldOffset = offset - Bits(offsetBytes*8)
+
+ // We know that cf.offset is aligned at to at least align since we get it
+ // from the compiler via BTF. Adding an aligned offsetBytes preserves the
+ // alignment.
+ cf.offset += offsetBytes
+ return nil
+}
+
+func (cf *coreField) sizeBits() (Bits, error) {
+ if cf.bitfieldSize > 0 {
+ return cf.bitfieldSize, nil
+ }
+
+ // Someone is trying to access a non-bitfield via a bit shift relocation.
+ // This happens when a field changes from a bitfield to a regular field
+ // between kernel versions. Synthesise the size to make the shifts work.
+ size, err := Sizeof(cf.Type)
+ if err != nil {
+ return 0, err
+ }
+ return Bits(size * 8), nil
+}
+
+// coreFindField descends into the local type using the accessor and tries to
+// find an equivalent field in target at each step.
+//
+// Returns the field and the offset of the field from the start of
+// target in bits.
+func coreFindField(localT Type, localAcc coreAccessor, targetT Type) (coreField, coreField, error) {
+ local := coreField{Type: localT}
+ target := coreField{Type: targetT}
+
+ if err := coreAreMembersCompatible(local.Type, target.Type); err != nil {
+ return coreField{}, coreField{}, fmt.Errorf("fields: %w", err)
+ }
+
+ // The first index is used to offset a pointer of the base type like
+ // when accessing an array.
+ if err := local.adjustOffsetToNthElement(localAcc[0]); err != nil {
+ return coreField{}, coreField{}, err
+ }
+
+ if err := target.adjustOffsetToNthElement(localAcc[0]); err != nil {
+ return coreField{}, coreField{}, err
+ }
+
+ var localMaybeFlex, targetMaybeFlex bool
+ for i, acc := range localAcc[1:] {
+ switch localType := UnderlyingType(local.Type).(type) {
+ case composite:
+ // For composite types acc is used to find the field in the local type,
+ // and then we try to find a field in target with the same name.
+ localMembers := localType.members()
+ if acc >= len(localMembers) {
+ return coreField{}, coreField{}, fmt.Errorf("invalid accessor %d for %s", acc, localType)
+ }
+
+ localMember := localMembers[acc]
+ if localMember.Name == "" {
+ localMemberType, ok := as[composite](localMember.Type)
+ if !ok {
+ return coreField{}, coreField{}, fmt.Errorf("unnamed field with type %s: %s", localMember.Type, ErrNotSupported)
+ }
+
+ // This is an anonymous struct or union, ignore it.
+ local = coreField{
+ Type: localMemberType,
+ offset: local.offset + localMember.Offset.Bytes(),
+ }
+ localMaybeFlex = false
+ continue
+ }
+
+ targetType, ok := as[composite](target.Type)
+ if !ok {
+ return coreField{}, coreField{}, fmt.Errorf("target not composite: %w", errImpossibleRelocation)
+ }
+
+ targetMember, last, err := coreFindMember(targetType, localMember.Name)
+ if err != nil {
+ return coreField{}, coreField{}, err
+ }
+
+ local = coreField{
+ Type: localMember.Type,
+ offset: local.offset,
+ bitfieldSize: localMember.BitfieldSize,
+ }
+ localMaybeFlex = acc == len(localMembers)-1
+
+ target = coreField{
+ Type: targetMember.Type,
+ offset: target.offset,
+ bitfieldSize: targetMember.BitfieldSize,
+ }
+ targetMaybeFlex = last
+
+ if local.bitfieldSize == 0 && target.bitfieldSize == 0 {
+ local.offset += localMember.Offset.Bytes()
+ target.offset += targetMember.Offset.Bytes()
+ break
+ }
+
+ // Either of the members is a bitfield. Make sure we're at the
+ // end of the accessor.
+ if next := i + 1; next < len(localAcc[1:]) {
+ return coreField{}, coreField{}, fmt.Errorf("can't descend into bitfield")
+ }
+
+ if err := local.adjustOffsetBits(localMember.Offset); err != nil {
+ return coreField{}, coreField{}, err
+ }
+
+ if err := target.adjustOffsetBits(targetMember.Offset); err != nil {
+ return coreField{}, coreField{}, err
+ }
+
+ case *Array:
+ // For arrays, acc is the index in the target.
+ targetType, ok := as[*Array](target.Type)
+ if !ok {
+ return coreField{}, coreField{}, fmt.Errorf("target not array: %w", errImpossibleRelocation)
+ }
+
+ if localType.Nelems == 0 && !localMaybeFlex {
+ return coreField{}, coreField{}, fmt.Errorf("local type has invalid flexible array")
+ }
+ if targetType.Nelems == 0 && !targetMaybeFlex {
+ return coreField{}, coreField{}, fmt.Errorf("target type has invalid flexible array")
+ }
+
+ if localType.Nelems > 0 && acc >= int(localType.Nelems) {
+ return coreField{}, coreField{}, fmt.Errorf("invalid access of %s at index %d", localType, acc)
+ }
+ if targetType.Nelems > 0 && acc >= int(targetType.Nelems) {
+ return coreField{}, coreField{}, fmt.Errorf("out of bounds access of target: %w", errImpossibleRelocation)
+ }
+
+ local = coreField{
+ Type: localType.Type,
+ offset: local.offset,
+ }
+ localMaybeFlex = false
+
+ if err := local.adjustOffsetToNthElement(acc); err != nil {
+ return coreField{}, coreField{}, err
+ }
+
+ target = coreField{
+ Type: targetType.Type,
+ offset: target.offset,
+ }
+ targetMaybeFlex = false
+
+ if err := target.adjustOffsetToNthElement(acc); err != nil {
+ return coreField{}, coreField{}, err
+ }
+
+ default:
+ return coreField{}, coreField{}, fmt.Errorf("relocate field of %T: %w", localType, ErrNotSupported)
+ }
+
+ if err := coreAreMembersCompatible(local.Type, target.Type); err != nil {
+ return coreField{}, coreField{}, err
+ }
+ }
+
+ return local, target, nil
+}
+
+// coreFindMember finds a member in a composite type while handling anonymous
+// structs and unions.
+func coreFindMember(typ composite, name string) (Member, bool, error) {
+ if name == "" {
+ return Member{}, false, errors.New("can't search for anonymous member")
+ }
+
+ type offsetTarget struct {
+ composite
+ offset Bits
+ }
+
+ targets := []offsetTarget{{typ, 0}}
+ visited := make(map[composite]bool)
+
+ for i := 0; i < len(targets); i++ {
+ target := targets[i]
+
+ // Only visit targets once to prevent infinite recursion.
+ if visited[target] {
+ continue
+ }
+ if len(visited) >= maxTypeDepth {
+ // This check is different than libbpf, which restricts the entire
+ // path to BPF_CORE_SPEC_MAX_LEN items.
+ return Member{}, false, fmt.Errorf("type is nested too deep")
+ }
+ visited[target] = true
+
+ members := target.members()
+ for j, member := range members {
+ if member.Name == name {
+ // NB: This is safe because member is a copy.
+ member.Offset += target.offset
+ return member, j == len(members)-1, nil
+ }
+
+ // The names don't match, but this member could be an anonymous struct
+ // or union.
+ if member.Name != "" {
+ continue
+ }
+
+ comp, ok := as[composite](member.Type)
+ if !ok {
+ return Member{}, false, fmt.Errorf("anonymous non-composite type %T not allowed", member.Type)
+ }
+
+ targets = append(targets, offsetTarget{comp, target.offset + member.Offset})
+ }
+ }
+
+ return Member{}, false, fmt.Errorf("no matching member: %w", errImpossibleRelocation)
+}
+
+// coreFindEnumValue follows localAcc to find the equivalent enum value in target.
+func coreFindEnumValue(local Type, localAcc coreAccessor, target Type) (localValue, targetValue *EnumValue, _ error) {
+ localValue, err := localAcc.enumValue(local)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ targetEnum, ok := as[*Enum](target)
+ if !ok {
+ return nil, nil, errImpossibleRelocation
+ }
+
+ localName := newEssentialName(localValue.Name)
+ for i, targetValue := range targetEnum.Values {
+ if newEssentialName(targetValue.Name) != localName {
+ continue
+ }
+
+ return localValue, &targetEnum.Values[i], nil
+ }
+
+ return nil, nil, errImpossibleRelocation
+}
+
+// CheckTypeCompatibility checks local and target types for Compatibility according to CO-RE rules.
+//
+// Only layout compatibility is checked, ignoring names of the root type.
+func CheckTypeCompatibility(localType Type, targetType Type) error {
+ return coreAreTypesCompatible(localType, targetType)
+}
+
+/* The comment below is from bpf_core_types_are_compat in libbpf.c:
+ *
+ * Check local and target types for compatibility. This check is used for
+ * type-based CO-RE relocations and follow slightly different rules than
+ * field-based relocations. This function assumes that root types were already
+ * checked for name match. Beyond that initial root-level name check, names
+ * are completely ignored. Compatibility rules are as follows:
+ * - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs are considered compatible, but
+ * kind should match for local and target types (i.e., STRUCT is not
+ * compatible with UNION);
+ * - for ENUMs, the size is ignored;
+ * - for INT, size and signedness are ignored;
+ * - for ARRAY, dimensionality is ignored, element types are checked for
+ * compatibility recursively;
+ * - CONST/VOLATILE/RESTRICT modifiers are ignored;
+ * - TYPEDEFs/PTRs are compatible if types they pointing to are compatible;
+ * - FUNC_PROTOs are compatible if they have compatible signature: same
+ * number of input args and compatible return and argument types.
+ * These rules are not set in stone and probably will be adjusted as we get
+ * more experience with using BPF CO-RE relocations.
+ *
+ * Returns errIncompatibleTypes if types are not compatible.
+ */
+func coreAreTypesCompatible(localType Type, targetType Type) error {
+
+ var (
+ localTs, targetTs typeDeque
+ l, t = &localType, &targetType
+ depth = 0
+ )
+
+ for ; l != nil && t != nil; l, t = localTs.Shift(), targetTs.Shift() {
+ if depth >= maxTypeDepth {
+ return errors.New("types are nested too deep")
+ }
+
+ localType = UnderlyingType(*l)
+ targetType = UnderlyingType(*t)
+
+ if reflect.TypeOf(localType) != reflect.TypeOf(targetType) {
+ return fmt.Errorf("type mismatch: %w", errIncompatibleTypes)
+ }
+
+ switch lv := (localType).(type) {
+ case *Void, *Struct, *Union, *Enum, *Fwd, *Int:
+ // Nothing to do here
+
+ case *Pointer, *Array:
+ depth++
+ walkType(localType, localTs.Push)
+ walkType(targetType, targetTs.Push)
+
+ case *FuncProto:
+ tv := targetType.(*FuncProto)
+ if len(lv.Params) != len(tv.Params) {
+ return fmt.Errorf("function param mismatch: %w", errIncompatibleTypes)
+ }
+
+ depth++
+ walkType(localType, localTs.Push)
+ walkType(targetType, targetTs.Push)
+
+ default:
+ return fmt.Errorf("unsupported type %T", localType)
+ }
+ }
+
+ if l != nil {
+ return fmt.Errorf("dangling local type %T", *l)
+ }
+
+ if t != nil {
+ return fmt.Errorf("dangling target type %T", *t)
+ }
+
+ return nil
+}
+
+/* coreAreMembersCompatible checks two types for field-based relocation compatibility.
+ *
+ * The comment below is from bpf_core_fields_are_compat in libbpf.c:
+ *
+ * Check two types for compatibility for the purpose of field access
+ * relocation. const/volatile/restrict and typedefs are skipped to ensure we
+ * are relocating semantically compatible entities:
+ * - any two STRUCTs/UNIONs are compatible and can be mixed;
+ * - any two FWDs are compatible, if their names match (modulo flavor suffix);
+ * - any two PTRs are always compatible;
+ * - for ENUMs, names should be the same (ignoring flavor suffix) or at
+ * least one of enums should be anonymous;
+ * - for ENUMs, check sizes, names are ignored;
+ * - for INT, size and signedness are ignored;
+ * - any two FLOATs are always compatible;
+ * - for ARRAY, dimensionality is ignored, element types are checked for
+ * compatibility recursively;
+ * [ NB: coreAreMembersCompatible doesn't recurse, this check is done
+ * by coreFindField. ]
+ * - everything else shouldn't be ever a target of relocation.
+ * These rules are not set in stone and probably will be adjusted as we get
+ * more experience with using BPF CO-RE relocations.
+ *
+ * Returns errImpossibleRelocation if the members are not compatible.
+ */
+func coreAreMembersCompatible(localType Type, targetType Type) error {
+ localType = UnderlyingType(localType)
+ targetType = UnderlyingType(targetType)
+
+ doNamesMatch := func(a, b string) error {
+ if a == "" || b == "" {
+ // allow anonymous and named type to match
+ return nil
+ }
+
+ if newEssentialName(a) == newEssentialName(b) {
+ return nil
+ }
+
+ return fmt.Errorf("names don't match: %w", errImpossibleRelocation)
+ }
+
+ _, lok := localType.(composite)
+ _, tok := targetType.(composite)
+ if lok && tok {
+ return nil
+ }
+
+ if reflect.TypeOf(localType) != reflect.TypeOf(targetType) {
+ return fmt.Errorf("type mismatch: %w", errImpossibleRelocation)
+ }
+
+ switch lv := localType.(type) {
+ case *Array, *Pointer, *Float, *Int:
+ return nil
+
+ case *Enum:
+ tv := targetType.(*Enum)
+ return doNamesMatch(lv.Name, tv.Name)
+
+ case *Fwd:
+ tv := targetType.(*Fwd)
+ return doNamesMatch(lv.Name, tv.Name)
+
+ default:
+ return fmt.Errorf("type %s: %w", localType, ErrNotSupported)
+ }
+}
diff --git a/vendor/github.com/cilium/ebpf/btf/doc.go b/vendor/github.com/cilium/ebpf/btf/doc.go
new file mode 100644
index 000000000..b1f4b1fc3
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/btf/doc.go
@@ -0,0 +1,5 @@
+// Package btf handles data encoded according to the BPF Type Format.
+//
+// The canonical documentation lives in the Linux kernel repository and is
+// available at https://www.kernel.org/doc/html/latest/bpf/btf.html
+package btf
diff --git a/vendor/github.com/cilium/ebpf/btf/ext_info.go b/vendor/github.com/cilium/ebpf/btf/ext_info.go
new file mode 100644
index 000000000..36803504b
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/btf/ext_info.go
@@ -0,0 +1,829 @@
+package btf
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "sort"
+
+ "github.com/cilium/ebpf/asm"
+ "github.com/cilium/ebpf/internal"
+)
+
+// ExtInfos contains ELF section metadata.
+type ExtInfos struct {
+ // The slices are sorted by offset in ascending order.
+ funcInfos map[string]FuncInfos
+ lineInfos map[string]LineInfos
+ relocationInfos map[string]CORERelocationInfos
+}
+
+// loadExtInfosFromELF parses ext infos from the .BTF.ext section in an ELF.
+//
+// Returns an error wrapping ErrNotFound if no ext infos are present.
+func loadExtInfosFromELF(file *internal.SafeELFFile, spec *Spec) (*ExtInfos, error) {
+ section := file.Section(".BTF.ext")
+ if section == nil {
+ return nil, fmt.Errorf("btf ext infos: %w", ErrNotFound)
+ }
+
+ if section.ReaderAt == nil {
+ return nil, fmt.Errorf("compressed ext_info is not supported")
+ }
+
+ return loadExtInfos(section.ReaderAt, file.ByteOrder, spec)
+}
+
+// loadExtInfos parses bare ext infos.
+func loadExtInfos(r io.ReaderAt, bo binary.ByteOrder, spec *Spec) (*ExtInfos, error) {
+ // Open unbuffered section reader. binary.Read() calls io.ReadFull on
+ // the header structs, resulting in one syscall per header.
+ headerRd := io.NewSectionReader(r, 0, math.MaxInt64)
+ extHeader, err := parseBTFExtHeader(headerRd, bo)
+ if err != nil {
+ return nil, fmt.Errorf("parsing BTF extension header: %w", err)
+ }
+
+ coreHeader, err := parseBTFExtCOREHeader(headerRd, bo, extHeader)
+ if err != nil {
+ return nil, fmt.Errorf("parsing BTF CO-RE header: %w", err)
+ }
+
+ buf := internal.NewBufferedSectionReader(r, extHeader.funcInfoStart(), int64(extHeader.FuncInfoLen))
+ btfFuncInfos, err := parseFuncInfos(buf, bo, spec.strings)
+ if err != nil {
+ return nil, fmt.Errorf("parsing BTF function info: %w", err)
+ }
+
+ funcInfos := make(map[string]FuncInfos, len(btfFuncInfos))
+ for section, bfis := range btfFuncInfos {
+ funcInfos[section], err = newFuncInfos(bfis, spec)
+ if err != nil {
+ return nil, fmt.Errorf("section %s: func infos: %w", section, err)
+ }
+ }
+
+ buf = internal.NewBufferedSectionReader(r, extHeader.lineInfoStart(), int64(extHeader.LineInfoLen))
+ btfLineInfos, err := parseLineInfos(buf, bo, spec.strings)
+ if err != nil {
+ return nil, fmt.Errorf("parsing BTF line info: %w", err)
+ }
+
+ lineInfos := make(map[string]LineInfos, len(btfLineInfos))
+ for section, blis := range btfLineInfos {
+ lineInfos[section], err = newLineInfos(blis, spec.strings)
+ if err != nil {
+ return nil, fmt.Errorf("section %s: line infos: %w", section, err)
+ }
+ }
+
+ if coreHeader == nil || coreHeader.COREReloLen == 0 {
+ return &ExtInfos{funcInfos, lineInfos, nil}, nil
+ }
+
+ var btfCORERelos map[string][]bpfCORERelo
+ buf = internal.NewBufferedSectionReader(r, extHeader.coreReloStart(coreHeader), int64(coreHeader.COREReloLen))
+ btfCORERelos, err = parseCORERelos(buf, bo, spec.strings)
+ if err != nil {
+ return nil, fmt.Errorf("parsing CO-RE relocation info: %w", err)
+ }
+
+ coreRelos := make(map[string]CORERelocationInfos, len(btfCORERelos))
+ for section, brs := range btfCORERelos {
+ coreRelos[section], err = newRelocationInfos(brs, spec, spec.strings)
+ if err != nil {
+ return nil, fmt.Errorf("section %s: CO-RE relocations: %w", section, err)
+ }
+ }
+
+ return &ExtInfos{funcInfos, lineInfos, coreRelos}, nil
+}
+
+type funcInfoMeta struct{}
+type coreRelocationMeta struct{}
+
+// Assign per-section metadata from BTF to a section's instructions.
+func (ei *ExtInfos) Assign(insns asm.Instructions, section string) {
+ funcInfos := ei.funcInfos[section]
+ lineInfos := ei.lineInfos[section]
+ reloInfos := ei.relocationInfos[section]
+
+ AssignMetadataToInstructions(insns, funcInfos, lineInfos, reloInfos)
+}
+
+// Assign per-instruction metadata to the instructions in insns.
+func AssignMetadataToInstructions(
+ insns asm.Instructions,
+ funcInfos FuncInfos,
+ lineInfos LineInfos,
+ reloInfos CORERelocationInfos,
+) {
+ iter := insns.Iterate()
+ for iter.Next() {
+ if len(funcInfos.infos) > 0 && funcInfos.infos[0].offset == iter.Offset {
+ *iter.Ins = WithFuncMetadata(*iter.Ins, funcInfos.infos[0].fn)
+ funcInfos.infos = funcInfos.infos[1:]
+ }
+
+ if len(lineInfos.infos) > 0 && lineInfos.infos[0].offset == iter.Offset {
+ *iter.Ins = iter.Ins.WithSource(lineInfos.infos[0].line)
+ lineInfos.infos = lineInfos.infos[1:]
+ }
+
+ if len(reloInfos.infos) > 0 && reloInfos.infos[0].offset == iter.Offset {
+ iter.Ins.Metadata.Set(coreRelocationMeta{}, reloInfos.infos[0].relo)
+ reloInfos.infos = reloInfos.infos[1:]
+ }
+ }
+}
+
+// MarshalExtInfos encodes function and line info embedded in insns into kernel
+// wire format.
+//
+// Returns ErrNotSupported if the kernel doesn't support BTF-associated programs.
+func MarshalExtInfos(insns asm.Instructions) (_ *Handle, funcInfos, lineInfos []byte, _ error) {
+ // Bail out early if the kernel doesn't support Func(Proto). If this is the
+ // case, func_info will also be unsupported.
+ if err := haveProgBTF(); err != nil {
+ return nil, nil, nil, err
+ }
+
+ iter := insns.Iterate()
+ for iter.Next() {
+ _, ok := iter.Ins.Source().(*Line)
+ fn := FuncMetadata(iter.Ins)
+ if ok || fn != nil {
+ goto marshal
+ }
+ }
+
+ return nil, nil, nil, nil
+
+marshal:
+ var b Builder
+ var fiBuf, liBuf bytes.Buffer
+ for {
+ if fn := FuncMetadata(iter.Ins); fn != nil {
+ fi := &funcInfo{
+ fn: fn,
+ offset: iter.Offset,
+ }
+ if err := fi.marshal(&fiBuf, &b); err != nil {
+ return nil, nil, nil, fmt.Errorf("write func info: %w", err)
+ }
+ }
+
+ if line, ok := iter.Ins.Source().(*Line); ok {
+ li := &lineInfo{
+ line: line,
+ offset: iter.Offset,
+ }
+ if err := li.marshal(&liBuf, &b); err != nil {
+ return nil, nil, nil, fmt.Errorf("write line info: %w", err)
+ }
+ }
+
+ if !iter.Next() {
+ break
+ }
+ }
+
+ handle, err := NewHandle(&b)
+ return handle, fiBuf.Bytes(), liBuf.Bytes(), err
+}
+
+// btfExtHeader is found at the start of the .BTF.ext section.
+type btfExtHeader struct {
+ Magic uint16
+ Version uint8
+ Flags uint8
+
+ // HdrLen is larger than the size of struct btfExtHeader when it is
+ // immediately followed by a btfExtCOREHeader.
+ HdrLen uint32
+
+ FuncInfoOff uint32
+ FuncInfoLen uint32
+ LineInfoOff uint32
+ LineInfoLen uint32
+}
+
+// parseBTFExtHeader parses the header of the .BTF.ext section.
+func parseBTFExtHeader(r io.Reader, bo binary.ByteOrder) (*btfExtHeader, error) {
+ var header btfExtHeader
+ if err := binary.Read(r, bo, &header); err != nil {
+ return nil, fmt.Errorf("can't read header: %v", err)
+ }
+
+ if header.Magic != btfMagic {
+ return nil, fmt.Errorf("incorrect magic value %v", header.Magic)
+ }
+
+ if header.Version != 1 {
+ return nil, fmt.Errorf("unexpected version %v", header.Version)
+ }
+
+ if header.Flags != 0 {
+ return nil, fmt.Errorf("unsupported flags %v", header.Flags)
+ }
+
+ if int64(header.HdrLen) < int64(binary.Size(&header)) {
+ return nil, fmt.Errorf("header length shorter than btfExtHeader size")
+ }
+
+ return &header, nil
+}
+
+// funcInfoStart returns the offset from the beginning of the .BTF.ext section
+// to the start of its func_info entries.
+func (h *btfExtHeader) funcInfoStart() int64 {
+ return int64(h.HdrLen + h.FuncInfoOff)
+}
+
+// lineInfoStart returns the offset from the beginning of the .BTF.ext section
+// to the start of its line_info entries.
+func (h *btfExtHeader) lineInfoStart() int64 {
+ return int64(h.HdrLen + h.LineInfoOff)
+}
+
+// coreReloStart returns the offset from the beginning of the .BTF.ext section
+// to the start of its CO-RE relocation entries.
+func (h *btfExtHeader) coreReloStart(ch *btfExtCOREHeader) int64 {
+ return int64(h.HdrLen + ch.COREReloOff)
+}
+
+// btfExtCOREHeader is found right after the btfExtHeader when its HdrLen
+// field is larger than its size.
+type btfExtCOREHeader struct {
+ COREReloOff uint32
+ COREReloLen uint32
+}
+
+// parseBTFExtCOREHeader parses the tail of the .BTF.ext header. If additional
+// header bytes are present, extHeader.HdrLen will be larger than the struct,
+// indicating the presence of a CO-RE extension header.
+func parseBTFExtCOREHeader(r io.Reader, bo binary.ByteOrder, extHeader *btfExtHeader) (*btfExtCOREHeader, error) {
+ extHdrSize := int64(binary.Size(&extHeader))
+ remainder := int64(extHeader.HdrLen) - extHdrSize
+
+ if remainder == 0 {
+ return nil, nil
+ }
+
+ var coreHeader btfExtCOREHeader
+ if err := binary.Read(r, bo, &coreHeader); err != nil {
+ return nil, fmt.Errorf("can't read header: %v", err)
+ }
+
+ return &coreHeader, nil
+}
+
+type btfExtInfoSec struct {
+ SecNameOff uint32
+ NumInfo uint32
+}
+
+// parseExtInfoSec parses a btf_ext_info_sec header within .BTF.ext,
+// appearing within func_info and line_info sub-sections.
+// These headers appear once for each program section in the ELF and are
+// followed by one or more func/line_info records for the section.
+func parseExtInfoSec(r io.Reader, bo binary.ByteOrder, strings *stringTable) (string, *btfExtInfoSec, error) {
+ var infoHeader btfExtInfoSec
+ if err := binary.Read(r, bo, &infoHeader); err != nil {
+ return "", nil, fmt.Errorf("read ext info header: %w", err)
+ }
+
+ secName, err := strings.Lookup(infoHeader.SecNameOff)
+ if err != nil {
+ return "", nil, fmt.Errorf("get section name: %w", err)
+ }
+ if secName == "" {
+ return "", nil, fmt.Errorf("extinfo header refers to empty section name")
+ }
+
+ if infoHeader.NumInfo == 0 {
+ return "", nil, fmt.Errorf("section %s has zero records", secName)
+ }
+
+ return secName, &infoHeader, nil
+}
+
+// parseExtInfoRecordSize parses the uint32 at the beginning of a func_infos
+// or line_infos segment that describes the length of all extInfoRecords in
+// that segment.
+func parseExtInfoRecordSize(r io.Reader, bo binary.ByteOrder) (uint32, error) {
+ const maxRecordSize = 256
+
+ var recordSize uint32
+ if err := binary.Read(r, bo, &recordSize); err != nil {
+ return 0, fmt.Errorf("can't read record size: %v", err)
+ }
+
+ if recordSize < 4 {
+ // Need at least InsnOff worth of bytes per record.
+ return 0, errors.New("record size too short")
+ }
+ if recordSize > maxRecordSize {
+ return 0, fmt.Errorf("record size %v exceeds %v", recordSize, maxRecordSize)
+ }
+
+ return recordSize, nil
+}
+
+// FuncInfos contains a sorted list of func infos.
+type FuncInfos struct {
+ infos []funcInfo
+}
+
+// The size of a FuncInfo in BTF wire format.
+var FuncInfoSize = uint32(binary.Size(bpfFuncInfo{}))
+
+type funcInfo struct {
+ fn *Func
+ offset asm.RawInstructionOffset
+}
+
+type bpfFuncInfo struct {
+ // Instruction offset of the function within an ELF section.
+ InsnOff uint32
+ TypeID TypeID
+}
+
+func newFuncInfo(fi bpfFuncInfo, spec *Spec) (*funcInfo, error) {
+ typ, err := spec.TypeByID(fi.TypeID)
+ if err != nil {
+ return nil, err
+ }
+
+ fn, ok := typ.(*Func)
+ if !ok {
+ return nil, fmt.Errorf("type ID %d is a %T, but expected a Func", fi.TypeID, typ)
+ }
+
+ // C doesn't have anonymous functions, but check just in case.
+ if fn.Name == "" {
+ return nil, fmt.Errorf("func with type ID %d doesn't have a name", fi.TypeID)
+ }
+
+ return &funcInfo{
+ fn,
+ asm.RawInstructionOffset(fi.InsnOff),
+ }, nil
+}
+
+func newFuncInfos(bfis []bpfFuncInfo, spec *Spec) (FuncInfos, error) {
+ fis := FuncInfos{
+ infos: make([]funcInfo, 0, len(bfis)),
+ }
+ for _, bfi := range bfis {
+ fi, err := newFuncInfo(bfi, spec)
+ if err != nil {
+ return FuncInfos{}, fmt.Errorf("offset %d: %w", bfi.InsnOff, err)
+ }
+ fis.infos = append(fis.infos, *fi)
+ }
+ sort.Slice(fis.infos, func(i, j int) bool {
+ return fis.infos[i].offset <= fis.infos[j].offset
+ })
+ return fis, nil
+}
+
+// LoadFuncInfos parses btf func info in wire format.
+func LoadFuncInfos(reader io.Reader, bo binary.ByteOrder, recordNum uint32, spec *Spec) (FuncInfos, error) {
+ fis, err := parseFuncInfoRecords(
+ reader,
+ bo,
+ FuncInfoSize,
+ recordNum,
+ )
+ if err != nil {
+ return FuncInfos{}, fmt.Errorf("parsing BTF func info: %w", err)
+ }
+
+ return newFuncInfos(fis, spec)
+}
+
+// marshal into the BTF wire format.
+func (fi *funcInfo) marshal(w *bytes.Buffer, b *Builder) error {
+ id, err := b.Add(fi.fn)
+ if err != nil {
+ return err
+ }
+ bfi := bpfFuncInfo{
+ InsnOff: uint32(fi.offset),
+ TypeID: id,
+ }
+ buf := make([]byte, FuncInfoSize)
+ internal.NativeEndian.PutUint32(buf, bfi.InsnOff)
+ internal.NativeEndian.PutUint32(buf[4:], uint32(bfi.TypeID))
+ _, err = w.Write(buf)
+ return err
+}
+
+// parseFuncInfos parses a func_info sub-section within .BTF.ext ito a map of
+// func infos indexed by section name.
+func parseFuncInfos(r io.Reader, bo binary.ByteOrder, strings *stringTable) (map[string][]bpfFuncInfo, error) {
+ recordSize, err := parseExtInfoRecordSize(r, bo)
+ if err != nil {
+ return nil, err
+ }
+
+ result := make(map[string][]bpfFuncInfo)
+ for {
+ secName, infoHeader, err := parseExtInfoSec(r, bo, strings)
+ if errors.Is(err, io.EOF) {
+ return result, nil
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ records, err := parseFuncInfoRecords(r, bo, recordSize, infoHeader.NumInfo)
+ if err != nil {
+ return nil, fmt.Errorf("section %v: %w", secName, err)
+ }
+
+ result[secName] = records
+ }
+}
+
+// parseFuncInfoRecords parses a stream of func_infos into a funcInfos.
+// These records appear after a btf_ext_info_sec header in the func_info
+// sub-section of .BTF.ext.
+func parseFuncInfoRecords(r io.Reader, bo binary.ByteOrder, recordSize uint32, recordNum uint32) ([]bpfFuncInfo, error) {
+ var out []bpfFuncInfo
+ var fi bpfFuncInfo
+
+ if exp, got := FuncInfoSize, recordSize; exp != got {
+ // BTF blob's record size is longer than we know how to parse.
+ return nil, fmt.Errorf("expected FuncInfo record size %d, but BTF blob contains %d", exp, got)
+ }
+
+ for i := uint32(0); i < recordNum; i++ {
+ if err := binary.Read(r, bo, &fi); err != nil {
+ return nil, fmt.Errorf("can't read function info: %v", err)
+ }
+
+ if fi.InsnOff%asm.InstructionSize != 0 {
+ return nil, fmt.Errorf("offset %v is not aligned with instruction size", fi.InsnOff)
+ }
+
+ // ELF tracks offset in bytes, the kernel expects raw BPF instructions.
+ // Convert as early as possible.
+ fi.InsnOff /= asm.InstructionSize
+
+ out = append(out, fi)
+ }
+
+ return out, nil
+}
+
+var LineInfoSize = uint32(binary.Size(bpfLineInfo{}))
+
+// Line represents the location and contents of a single line of source
+// code a BPF ELF was compiled from.
+type Line struct {
+ fileName string
+ line string
+ lineNumber uint32
+ lineColumn uint32
+}
+
+func (li *Line) FileName() string {
+ return li.fileName
+}
+
+func (li *Line) Line() string {
+ return li.line
+}
+
+func (li *Line) LineNumber() uint32 {
+ return li.lineNumber
+}
+
+func (li *Line) LineColumn() uint32 {
+ return li.lineColumn
+}
+
+func (li *Line) String() string {
+ return li.line
+}
+
+// LineInfos contains a sorted list of line infos.
+type LineInfos struct {
+ infos []lineInfo
+}
+
+type lineInfo struct {
+ line *Line
+ offset asm.RawInstructionOffset
+}
+
+// Constants for the format of bpfLineInfo.LineCol.
+const (
+ bpfLineShift = 10
+ bpfLineMax = (1 << (32 - bpfLineShift)) - 1
+ bpfColumnMax = (1 << bpfLineShift) - 1
+)
+
+type bpfLineInfo struct {
+ // Instruction offset of the line within the whole instruction stream, in instructions.
+ InsnOff uint32
+ FileNameOff uint32
+ LineOff uint32
+ LineCol uint32
+}
+
+// LoadLineInfos parses btf line info in wire format.
+func LoadLineInfos(reader io.Reader, bo binary.ByteOrder, recordNum uint32, spec *Spec) (LineInfos, error) {
+ lis, err := parseLineInfoRecords(
+ reader,
+ bo,
+ LineInfoSize,
+ recordNum,
+ )
+ if err != nil {
+ return LineInfos{}, fmt.Errorf("parsing BTF line info: %w", err)
+ }
+
+ return newLineInfos(lis, spec.strings)
+}
+
+func newLineInfo(li bpfLineInfo, strings *stringTable) (*lineInfo, error) {
+ line, err := strings.Lookup(li.LineOff)
+ if err != nil {
+ return nil, fmt.Errorf("lookup of line: %w", err)
+ }
+
+ fileName, err := strings.Lookup(li.FileNameOff)
+ if err != nil {
+ return nil, fmt.Errorf("lookup of filename: %w", err)
+ }
+
+ lineNumber := li.LineCol >> bpfLineShift
+ lineColumn := li.LineCol & bpfColumnMax
+
+ return &lineInfo{
+ &Line{
+ fileName,
+ line,
+ lineNumber,
+ lineColumn,
+ },
+ asm.RawInstructionOffset(li.InsnOff),
+ }, nil
+}
+
+func newLineInfos(blis []bpfLineInfo, strings *stringTable) (LineInfos, error) {
+ lis := LineInfos{
+ infos: make([]lineInfo, 0, len(blis)),
+ }
+ for _, bli := range blis {
+ li, err := newLineInfo(bli, strings)
+ if err != nil {
+ return LineInfos{}, fmt.Errorf("offset %d: %w", bli.InsnOff, err)
+ }
+ lis.infos = append(lis.infos, *li)
+ }
+ sort.Slice(lis.infos, func(i, j int) bool {
+ return lis.infos[i].offset <= lis.infos[j].offset
+ })
+ return lis, nil
+}
+
+// marshal writes the binary representation of the LineInfo to w.
+func (li *lineInfo) marshal(w *bytes.Buffer, b *Builder) error {
+ line := li.line
+ if line.lineNumber > bpfLineMax {
+ return fmt.Errorf("line %d exceeds %d", line.lineNumber, bpfLineMax)
+ }
+
+ if line.lineColumn > bpfColumnMax {
+ return fmt.Errorf("column %d exceeds %d", line.lineColumn, bpfColumnMax)
+ }
+
+ fileNameOff, err := b.addString(line.fileName)
+ if err != nil {
+ return fmt.Errorf("file name %q: %w", line.fileName, err)
+ }
+
+ lineOff, err := b.addString(line.line)
+ if err != nil {
+ return fmt.Errorf("line %q: %w", line.line, err)
+ }
+
+ bli := bpfLineInfo{
+ uint32(li.offset),
+ fileNameOff,
+ lineOff,
+ (line.lineNumber << bpfLineShift) | line.lineColumn,
+ }
+
+ buf := make([]byte, LineInfoSize)
+ internal.NativeEndian.PutUint32(buf, bli.InsnOff)
+ internal.NativeEndian.PutUint32(buf[4:], bli.FileNameOff)
+ internal.NativeEndian.PutUint32(buf[8:], bli.LineOff)
+ internal.NativeEndian.PutUint32(buf[12:], bli.LineCol)
+ _, err = w.Write(buf)
+ return err
+}
+
+// parseLineInfos parses a line_info sub-section within .BTF.ext ito a map of
+// line infos indexed by section name.
+func parseLineInfos(r io.Reader, bo binary.ByteOrder, strings *stringTable) (map[string][]bpfLineInfo, error) {
+ recordSize, err := parseExtInfoRecordSize(r, bo)
+ if err != nil {
+ return nil, err
+ }
+
+ result := make(map[string][]bpfLineInfo)
+ for {
+ secName, infoHeader, err := parseExtInfoSec(r, bo, strings)
+ if errors.Is(err, io.EOF) {
+ return result, nil
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ records, err := parseLineInfoRecords(r, bo, recordSize, infoHeader.NumInfo)
+ if err != nil {
+ return nil, fmt.Errorf("section %v: %w", secName, err)
+ }
+
+ result[secName] = records
+ }
+}
+
+// parseLineInfoRecords parses a stream of line_infos into a lineInfos.
+// These records appear after a btf_ext_info_sec header in the line_info
+// sub-section of .BTF.ext.
+func parseLineInfoRecords(r io.Reader, bo binary.ByteOrder, recordSize uint32, recordNum uint32) ([]bpfLineInfo, error) {
+ var out []bpfLineInfo
+ var li bpfLineInfo
+
+ if exp, got := uint32(binary.Size(li)), recordSize; exp != got {
+ // BTF blob's record size is longer than we know how to parse.
+ return nil, fmt.Errorf("expected LineInfo record size %d, but BTF blob contains %d", exp, got)
+ }
+
+ for i := uint32(0); i < recordNum; i++ {
+ if err := binary.Read(r, bo, &li); err != nil {
+ return nil, fmt.Errorf("can't read line info: %v", err)
+ }
+
+ if li.InsnOff%asm.InstructionSize != 0 {
+ return nil, fmt.Errorf("offset %v is not aligned with instruction size", li.InsnOff)
+ }
+
+ // ELF tracks offset in bytes, the kernel expects raw BPF instructions.
+ // Convert as early as possible.
+ li.InsnOff /= asm.InstructionSize
+
+ out = append(out, li)
+ }
+
+ return out, nil
+}
+
+// bpfCORERelo matches the kernel's struct bpf_core_relo.
+type bpfCORERelo struct {
+ InsnOff uint32
+ TypeID TypeID
+ AccessStrOff uint32
+ Kind coreKind
+}
+
+type CORERelocation struct {
+ // The local type of the relocation, stripped of typedefs and qualifiers.
+ typ Type
+ accessor coreAccessor
+ kind coreKind
+ // The ID of the local type in the source BTF.
+ id TypeID
+}
+
+func (cr *CORERelocation) String() string {
+ return fmt.Sprintf("CORERelocation(%s, %s[%s], local_id=%d)", cr.kind, cr.typ, cr.accessor, cr.id)
+}
+
+func CORERelocationMetadata(ins *asm.Instruction) *CORERelocation {
+ relo, _ := ins.Metadata.Get(coreRelocationMeta{}).(*CORERelocation)
+ return relo
+}
+
+// CORERelocationInfos contains a sorted list of co:re relocation infos.
+type CORERelocationInfos struct {
+ infos []coreRelocationInfo
+}
+
+type coreRelocationInfo struct {
+ relo *CORERelocation
+ offset asm.RawInstructionOffset
+}
+
+func newRelocationInfo(relo bpfCORERelo, spec *Spec, strings *stringTable) (*coreRelocationInfo, error) {
+ typ, err := spec.TypeByID(relo.TypeID)
+ if err != nil {
+ return nil, err
+ }
+
+ accessorStr, err := strings.Lookup(relo.AccessStrOff)
+ if err != nil {
+ return nil, err
+ }
+
+ accessor, err := parseCOREAccessor(accessorStr)
+ if err != nil {
+ return nil, fmt.Errorf("accessor %q: %s", accessorStr, err)
+ }
+
+ return &coreRelocationInfo{
+ &CORERelocation{
+ typ,
+ accessor,
+ relo.Kind,
+ relo.TypeID,
+ },
+ asm.RawInstructionOffset(relo.InsnOff),
+ }, nil
+}
+
+func newRelocationInfos(brs []bpfCORERelo, spec *Spec, strings *stringTable) (CORERelocationInfos, error) {
+ rs := CORERelocationInfos{
+ infos: make([]coreRelocationInfo, 0, len(brs)),
+ }
+ for _, br := range brs {
+ relo, err := newRelocationInfo(br, spec, strings)
+ if err != nil {
+ return CORERelocationInfos{}, fmt.Errorf("offset %d: %w", br.InsnOff, err)
+ }
+ rs.infos = append(rs.infos, *relo)
+ }
+ sort.Slice(rs.infos, func(i, j int) bool {
+ return rs.infos[i].offset < rs.infos[j].offset
+ })
+ return rs, nil
+}
+
+var extInfoReloSize = binary.Size(bpfCORERelo{})
+
+// parseCORERelos parses a core_relos sub-section within .BTF.ext ito a map of
+// CO-RE relocations indexed by section name.
+func parseCORERelos(r io.Reader, bo binary.ByteOrder, strings *stringTable) (map[string][]bpfCORERelo, error) {
+ recordSize, err := parseExtInfoRecordSize(r, bo)
+ if err != nil {
+ return nil, err
+ }
+
+ if recordSize != uint32(extInfoReloSize) {
+ return nil, fmt.Errorf("expected record size %d, got %d", extInfoReloSize, recordSize)
+ }
+
+ result := make(map[string][]bpfCORERelo)
+ for {
+ secName, infoHeader, err := parseExtInfoSec(r, bo, strings)
+ if errors.Is(err, io.EOF) {
+ return result, nil
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ records, err := parseCOREReloRecords(r, bo, recordSize, infoHeader.NumInfo)
+ if err != nil {
+ return nil, fmt.Errorf("section %v: %w", secName, err)
+ }
+
+ result[secName] = records
+ }
+}
+
+// parseCOREReloRecords parses a stream of CO-RE relocation entries into a
+// coreRelos. These records appear after a btf_ext_info_sec header in the
+// core_relos sub-section of .BTF.ext.
+func parseCOREReloRecords(r io.Reader, bo binary.ByteOrder, recordSize uint32, recordNum uint32) ([]bpfCORERelo, error) {
+ var out []bpfCORERelo
+
+ var relo bpfCORERelo
+ for i := uint32(0); i < recordNum; i++ {
+ if err := binary.Read(r, bo, &relo); err != nil {
+ return nil, fmt.Errorf("can't read CO-RE relocation: %v", err)
+ }
+
+ if relo.InsnOff%asm.InstructionSize != 0 {
+ return nil, fmt.Errorf("offset %v is not aligned with instruction size", relo.InsnOff)
+ }
+
+ // ELF tracks offset in bytes, the kernel expects raw BPF instructions.
+ // Convert as early as possible.
+ relo.InsnOff /= asm.InstructionSize
+
+ out = append(out, relo)
+ }
+
+ return out, nil
+}
diff --git a/vendor/github.com/cilium/ebpf/btf/feature.go b/vendor/github.com/cilium/ebpf/btf/feature.go
new file mode 100644
index 000000000..6feb08dfb
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/btf/feature.go
@@ -0,0 +1,123 @@
+package btf
+
+import (
+ "errors"
+ "math"
+
+ "github.com/cilium/ebpf/internal"
+ "github.com/cilium/ebpf/internal/sys"
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+// haveBTF attempts to load a BTF blob containing an Int. It should pass on any
+// kernel that supports BPF_BTF_LOAD.
+var haveBTF = internal.NewFeatureTest("BTF", "4.18", func() error {
+ // 0-length anonymous integer
+ err := probeBTF(&Int{})
+ if errors.Is(err, unix.EINVAL) || errors.Is(err, unix.EPERM) {
+ return internal.ErrNotSupported
+ }
+ return err
+})
+
+// haveMapBTF attempts to load a minimal BTF blob containing a Var. It is
+// used as a proxy for .bss, .data and .rodata map support, which generally
+// come with a Var and Datasec. These were introduced in Linux 5.2.
+var haveMapBTF = internal.NewFeatureTest("Map BTF (Var/Datasec)", "5.2", func() error {
+ if err := haveBTF(); err != nil {
+ return err
+ }
+
+ v := &Var{
+ Name: "a",
+ Type: &Pointer{(*Void)(nil)},
+ }
+
+ err := probeBTF(v)
+ if errors.Is(err, unix.EINVAL) || errors.Is(err, unix.EPERM) {
+ // Treat both EINVAL and EPERM as not supported: creating the map may still
+ // succeed without Btf* attrs.
+ return internal.ErrNotSupported
+ }
+ return err
+})
+
+// haveProgBTF attempts to load a BTF blob containing a Func and FuncProto. It
+// is used as a proxy for ext_info (func_info) support, which depends on
+// Func(Proto) by definition.
+var haveProgBTF = internal.NewFeatureTest("Program BTF (func/line_info)", "5.0", func() error {
+ if err := haveBTF(); err != nil {
+ return err
+ }
+
+ fn := &Func{
+ Name: "a",
+ Type: &FuncProto{Return: (*Void)(nil)},
+ }
+
+ err := probeBTF(fn)
+ if errors.Is(err, unix.EINVAL) || errors.Is(err, unix.EPERM) {
+ return internal.ErrNotSupported
+ }
+ return err
+})
+
+var haveFuncLinkage = internal.NewFeatureTest("BTF func linkage", "5.6", func() error {
+ if err := haveProgBTF(); err != nil {
+ return err
+ }
+
+ fn := &Func{
+ Name: "a",
+ Type: &FuncProto{Return: (*Void)(nil)},
+ Linkage: GlobalFunc,
+ }
+
+ err := probeBTF(fn)
+ if errors.Is(err, unix.EINVAL) {
+ return internal.ErrNotSupported
+ }
+ return err
+})
+
+var haveEnum64 = internal.NewFeatureTest("ENUM64", "6.0", func() error {
+ if err := haveBTF(); err != nil {
+ return err
+ }
+
+ enum := &Enum{
+ Size: 8,
+ Values: []EnumValue{
+ {"TEST", math.MaxUint32 + 1},
+ },
+ }
+
+ err := probeBTF(enum)
+ if errors.Is(err, unix.EINVAL) {
+ return internal.ErrNotSupported
+ }
+ return err
+})
+
+func probeBTF(typ Type) error {
+ b, err := NewBuilder([]Type{typ})
+ if err != nil {
+ return err
+ }
+
+ buf, err := b.Marshal(nil, nil)
+ if err != nil {
+ return err
+ }
+
+ fd, err := sys.BtfLoad(&sys.BtfLoadAttr{
+ Btf: sys.NewSlicePointer(buf),
+ BtfSize: uint32(len(buf)),
+ })
+
+ if err == nil {
+ fd.Close()
+ }
+
+ return err
+}
diff --git a/vendor/github.com/cilium/ebpf/btf/format.go b/vendor/github.com/cilium/ebpf/btf/format.go
new file mode 100644
index 000000000..acb489cd0
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/btf/format.go
@@ -0,0 +1,350 @@
+package btf
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+)
+
+var errNestedTooDeep = errors.New("nested too deep")
+
+// GoFormatter converts a Type to Go syntax.
+//
+// A zero GoFormatter is valid to use.
+type GoFormatter struct {
+ w strings.Builder
+
+ // Types present in this map are referred to using the given name if they
+ // are encountered when outputting another type.
+ Names map[Type]string
+
+ // Identifier is called for each field of struct-like types. By default the
+ // field name is used as is.
+ Identifier func(string) string
+
+ // EnumIdentifier is called for each element of an enum. By default the
+ // name of the enum type is concatenated with Identifier(element).
+ EnumIdentifier func(name, element string) string
+}
+
+// TypeDeclaration generates a Go type declaration for a BTF type.
+func (gf *GoFormatter) TypeDeclaration(name string, typ Type) (string, error) {
+ gf.w.Reset()
+ if err := gf.writeTypeDecl(name, typ); err != nil {
+ return "", err
+ }
+ return gf.w.String(), nil
+}
+
+func (gf *GoFormatter) identifier(s string) string {
+ if gf.Identifier != nil {
+ return gf.Identifier(s)
+ }
+
+ return s
+}
+
+func (gf *GoFormatter) enumIdentifier(name, element string) string {
+ if gf.EnumIdentifier != nil {
+ return gf.EnumIdentifier(name, element)
+ }
+
+ return name + gf.identifier(element)
+}
+
+// writeTypeDecl outputs a declaration of the given type.
+//
+// It encodes https://golang.org/ref/spec#Type_declarations:
+//
+// type foo struct { bar uint32; }
+// type bar int32
+func (gf *GoFormatter) writeTypeDecl(name string, typ Type) error {
+ if name == "" {
+ return fmt.Errorf("need a name for type %s", typ)
+ }
+
+ typ = skipQualifiers(typ)
+ fmt.Fprintf(&gf.w, "type %s ", name)
+ if err := gf.writeTypeLit(typ, 0); err != nil {
+ return err
+ }
+
+ e, ok := typ.(*Enum)
+ if !ok || len(e.Values) == 0 {
+ return nil
+ }
+
+ gf.w.WriteString("; const ( ")
+ for _, ev := range e.Values {
+ id := gf.enumIdentifier(name, ev.Name)
+ var value any
+ if e.Signed {
+ value = int64(ev.Value)
+ } else {
+ value = ev.Value
+ }
+ fmt.Fprintf(&gf.w, "%s %s = %d; ", id, name, value)
+ }
+ gf.w.WriteString(")")
+
+ return nil
+}
+
+// writeType outputs the name of a named type or a literal describing the type.
+//
+// It encodes https://golang.org/ref/spec#Types.
+//
+// foo (if foo is a named type)
+// uint32
+func (gf *GoFormatter) writeType(typ Type, depth int) error {
+ typ = skipQualifiers(typ)
+
+ name := gf.Names[typ]
+ if name != "" {
+ gf.w.WriteString(name)
+ return nil
+ }
+
+ return gf.writeTypeLit(typ, depth)
+}
+
+// writeTypeLit outputs a literal describing the type.
+//
+// The function ignores named types.
+//
+// It encodes https://golang.org/ref/spec#TypeLit.
+//
+// struct { bar uint32; }
+// uint32
+func (gf *GoFormatter) writeTypeLit(typ Type, depth int) error {
+ depth++
+ if depth > maxTypeDepth {
+ return errNestedTooDeep
+ }
+
+ var err error
+ switch v := skipQualifiers(typ).(type) {
+ case *Int:
+ err = gf.writeIntLit(v)
+
+ case *Enum:
+ if !v.Signed {
+ gf.w.WriteRune('u')
+ }
+ switch v.Size {
+ case 1:
+ gf.w.WriteString("int8")
+ case 2:
+ gf.w.WriteString("int16")
+ case 4:
+ gf.w.WriteString("int32")
+ case 8:
+ gf.w.WriteString("int64")
+ default:
+ err = fmt.Errorf("invalid enum size %d", v.Size)
+ }
+
+ case *Typedef:
+ err = gf.writeType(v.Type, depth)
+
+ case *Array:
+ fmt.Fprintf(&gf.w, "[%d]", v.Nelems)
+ err = gf.writeType(v.Type, depth)
+
+ case *Struct:
+ err = gf.writeStructLit(v.Size, v.Members, depth)
+
+ case *Union:
+ // Always choose the first member to represent the union in Go.
+ err = gf.writeStructLit(v.Size, v.Members[:1], depth)
+
+ case *Datasec:
+ err = gf.writeDatasecLit(v, depth)
+
+ default:
+ return fmt.Errorf("type %T: %w", v, ErrNotSupported)
+ }
+
+ if err != nil {
+ return fmt.Errorf("%s: %w", typ, err)
+ }
+
+ return nil
+}
+
+func (gf *GoFormatter) writeIntLit(i *Int) error {
+ bits := i.Size * 8
+ switch i.Encoding {
+ case Bool:
+ if i.Size != 1 {
+ return fmt.Errorf("bool with size %d", i.Size)
+ }
+ gf.w.WriteString("bool")
+ case Char:
+ if i.Size != 1 {
+ return fmt.Errorf("char with size %d", i.Size)
+ }
+ // BTF doesn't have a way to specify the signedness of a char. Assume
+ // we are dealing with unsigned, since this works nicely with []byte
+ // in Go code.
+ fallthrough
+ case Unsigned, Signed:
+ stem := "uint"
+ if i.Encoding == Signed {
+ stem = "int"
+ }
+ if i.Size > 8 {
+ fmt.Fprintf(&gf.w, "[%d]byte /* %s%d */", i.Size, stem, i.Size*8)
+ } else {
+ fmt.Fprintf(&gf.w, "%s%d", stem, bits)
+ }
+ default:
+ return fmt.Errorf("can't encode %s", i.Encoding)
+ }
+ return nil
+}
+
+func (gf *GoFormatter) writeStructLit(size uint32, members []Member, depth int) error {
+ gf.w.WriteString("struct { ")
+
+ prevOffset := uint32(0)
+ skippedBitfield := false
+ for i, m := range members {
+ if m.BitfieldSize > 0 {
+ skippedBitfield = true
+ continue
+ }
+
+ offset := m.Offset.Bytes()
+ if n := offset - prevOffset; skippedBitfield && n > 0 {
+ fmt.Fprintf(&gf.w, "_ [%d]byte /* unsupported bitfield */; ", n)
+ } else {
+ gf.writePadding(n)
+ }
+
+ fieldSize, err := Sizeof(m.Type)
+ if err != nil {
+ return fmt.Errorf("field %d: %w", i, err)
+ }
+
+ prevOffset = offset + uint32(fieldSize)
+ if prevOffset > size {
+ return fmt.Errorf("field %d of size %d exceeds type size %d", i, fieldSize, size)
+ }
+
+ if err := gf.writeStructField(m, depth); err != nil {
+ return fmt.Errorf("field %d: %w", i, err)
+ }
+ }
+
+ gf.writePadding(size - prevOffset)
+ gf.w.WriteString("}")
+ return nil
+}
+
+func (gf *GoFormatter) writeStructField(m Member, depth int) error {
+ if m.BitfieldSize > 0 {
+ return fmt.Errorf("bitfields are not supported")
+ }
+ if m.Offset%8 != 0 {
+ return fmt.Errorf("unsupported offset %d", m.Offset)
+ }
+
+ if m.Name == "" {
+ // Special case a nested anonymous union like
+ // struct foo { union { int bar; int baz }; }
+ // by replacing the whole union with its first member.
+ union, ok := m.Type.(*Union)
+ if !ok {
+ return fmt.Errorf("anonymous fields are not supported")
+
+ }
+
+ if len(union.Members) == 0 {
+ return errors.New("empty anonymous union")
+ }
+
+ depth++
+ if depth > maxTypeDepth {
+ return errNestedTooDeep
+ }
+
+ m := union.Members[0]
+ size, err := Sizeof(m.Type)
+ if err != nil {
+ return err
+ }
+
+ if err := gf.writeStructField(m, depth); err != nil {
+ return err
+ }
+
+ gf.writePadding(union.Size - uint32(size))
+ return nil
+
+ }
+
+ fmt.Fprintf(&gf.w, "%s ", gf.identifier(m.Name))
+
+ if err := gf.writeType(m.Type, depth); err != nil {
+ return err
+ }
+
+ gf.w.WriteString("; ")
+ return nil
+}
+
+func (gf *GoFormatter) writeDatasecLit(ds *Datasec, depth int) error {
+ gf.w.WriteString("struct { ")
+
+ prevOffset := uint32(0)
+ for i, vsi := range ds.Vars {
+ v, ok := vsi.Type.(*Var)
+ if !ok {
+ return fmt.Errorf("can't format %s as part of data section", vsi.Type)
+ }
+
+ if v.Linkage != GlobalVar {
+ // Ignore static, extern, etc. for now.
+ continue
+ }
+
+ if v.Name == "" {
+ return fmt.Errorf("variable %d: empty name", i)
+ }
+
+ gf.writePadding(vsi.Offset - prevOffset)
+ prevOffset = vsi.Offset + vsi.Size
+
+ fmt.Fprintf(&gf.w, "%s ", gf.identifier(v.Name))
+
+ if err := gf.writeType(v.Type, depth); err != nil {
+ return fmt.Errorf("variable %d: %w", i, err)
+ }
+
+ gf.w.WriteString("; ")
+ }
+
+ gf.writePadding(ds.Size - prevOffset)
+ gf.w.WriteString("}")
+ return nil
+}
+
+func (gf *GoFormatter) writePadding(bytes uint32) {
+ if bytes > 0 {
+ fmt.Fprintf(&gf.w, "_ [%d]byte; ", bytes)
+ }
+}
+
+func skipQualifiers(typ Type) Type {
+ result := typ
+ for depth := 0; depth <= maxTypeDepth; depth++ {
+ switch v := (result).(type) {
+ case qualifier:
+ result = v.qualify()
+ default:
+ return result
+ }
+ }
+ return &cycle{typ}
+}
diff --git a/vendor/github.com/cilium/ebpf/btf/handle.go b/vendor/github.com/cilium/ebpf/btf/handle.go
new file mode 100644
index 000000000..b6b3e87f5
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/btf/handle.go
@@ -0,0 +1,287 @@
+package btf
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "math"
+ "os"
+
+ "github.com/cilium/ebpf/internal"
+ "github.com/cilium/ebpf/internal/sys"
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+// Handle is a reference to BTF loaded into the kernel.
+type Handle struct {
+ fd *sys.FD
+
+ // Size of the raw BTF in bytes.
+ size uint32
+
+ needsKernelBase bool
+}
+
+// NewHandle loads the contents of a [Builder] into the kernel.
+//
+// Returns an error wrapping ErrNotSupported if the kernel doesn't support BTF.
+func NewHandle(b *Builder) (*Handle, error) {
+ small := getByteSlice()
+ defer putByteSlice(small)
+
+ buf, err := b.Marshal(*small, KernelMarshalOptions())
+ if err != nil {
+ return nil, fmt.Errorf("marshal BTF: %w", err)
+ }
+
+ return NewHandleFromRawBTF(buf)
+}
+
+// NewHandleFromRawBTF loads raw BTF into the kernel.
+//
+// Returns an error wrapping ErrNotSupported if the kernel doesn't support BTF.
+func NewHandleFromRawBTF(btf []byte) (*Handle, error) {
+ if uint64(len(btf)) > math.MaxUint32 {
+ return nil, errors.New("BTF exceeds the maximum size")
+ }
+
+ attr := &sys.BtfLoadAttr{
+ Btf: sys.NewSlicePointer(btf),
+ BtfSize: uint32(len(btf)),
+ }
+
+ fd, err := sys.BtfLoad(attr)
+ if err == nil {
+ return &Handle{fd, attr.BtfSize, false}, nil
+ }
+
+ if err := haveBTF(); err != nil {
+ return nil, err
+ }
+
+ logBuf := make([]byte, 64*1024)
+ attr.BtfLogBuf = sys.NewSlicePointer(logBuf)
+ attr.BtfLogSize = uint32(len(logBuf))
+ attr.BtfLogLevel = 1
+
+ // Up until at least kernel 6.0, the BTF verifier does not return ENOSPC
+ // if there are other verification errors. ENOSPC is only returned when
+ // the BTF blob is correct, a log was requested, and the provided buffer
+ // is too small.
+ _, ve := sys.BtfLoad(attr)
+ return nil, internal.ErrorWithLog("load btf", err, logBuf, errors.Is(ve, unix.ENOSPC))
+}
+
+// NewHandleFromID returns the BTF handle for a given id.
+//
+// Prefer calling [ebpf.Program.Handle] or [ebpf.Map.Handle] if possible.
+//
+// Returns ErrNotExist, if there is no BTF with the given id.
+//
+// Requires CAP_SYS_ADMIN.
+func NewHandleFromID(id ID) (*Handle, error) {
+ fd, err := sys.BtfGetFdById(&sys.BtfGetFdByIdAttr{
+ Id: uint32(id),
+ })
+ if err != nil {
+ return nil, fmt.Errorf("get FD for ID %d: %w", id, err)
+ }
+
+ info, err := newHandleInfoFromFD(fd)
+ if err != nil {
+ _ = fd.Close()
+ return nil, err
+ }
+
+ return &Handle{fd, info.size, info.IsModule()}, nil
+}
+
+// Spec parses the kernel BTF into Go types.
+//
+// base must contain type information for vmlinux if the handle is for
+// a kernel module. It may be nil otherwise.
+func (h *Handle) Spec(base *Spec) (*Spec, error) {
+ var btfInfo sys.BtfInfo
+ btfBuffer := make([]byte, h.size)
+ btfInfo.Btf, btfInfo.BtfSize = sys.NewSlicePointerLen(btfBuffer)
+
+ if err := sys.ObjInfo(h.fd, &btfInfo); err != nil {
+ return nil, err
+ }
+
+ if h.needsKernelBase && base == nil {
+ return nil, fmt.Errorf("missing base types")
+ }
+
+ return loadRawSpec(bytes.NewReader(btfBuffer), internal.NativeEndian, base)
+}
+
+// Close destroys the handle.
+//
+// Subsequent calls to FD will return an invalid value.
+func (h *Handle) Close() error {
+ if h == nil {
+ return nil
+ }
+
+ return h.fd.Close()
+}
+
+// FD returns the file descriptor for the handle.
+func (h *Handle) FD() int {
+ return h.fd.Int()
+}
+
+// Info returns metadata about the handle.
+func (h *Handle) Info() (*HandleInfo, error) {
+ return newHandleInfoFromFD(h.fd)
+}
+
+// HandleInfo describes a Handle.
+type HandleInfo struct {
+ // ID of this handle in the kernel. The ID is only valid as long as the
+ // associated handle is kept alive.
+ ID ID
+
+ // Name is an identifying name for the BTF, currently only used by the
+ // kernel.
+ Name string
+
+ // IsKernel is true if the BTF originated with the kernel and not
+ // userspace.
+ IsKernel bool
+
+ // Size of the raw BTF in bytes.
+ size uint32
+}
+
+func newHandleInfoFromFD(fd *sys.FD) (*HandleInfo, error) {
+ // We invoke the syscall once with a empty BTF and name buffers to get size
+ // information to allocate buffers. Then we invoke it a second time with
+ // buffers to receive the data.
+ var btfInfo sys.BtfInfo
+ if err := sys.ObjInfo(fd, &btfInfo); err != nil {
+ return nil, fmt.Errorf("get BTF info for fd %s: %w", fd, err)
+ }
+
+ if btfInfo.NameLen > 0 {
+ // NameLen doesn't account for the terminating NUL.
+ btfInfo.NameLen++
+ }
+
+ // Don't pull raw BTF by default, since it may be quite large.
+ btfSize := btfInfo.BtfSize
+ btfInfo.BtfSize = 0
+
+ nameBuffer := make([]byte, btfInfo.NameLen)
+ btfInfo.Name, btfInfo.NameLen = sys.NewSlicePointerLen(nameBuffer)
+ if err := sys.ObjInfo(fd, &btfInfo); err != nil {
+ return nil, err
+ }
+
+ return &HandleInfo{
+ ID: ID(btfInfo.Id),
+ Name: unix.ByteSliceToString(nameBuffer),
+ IsKernel: btfInfo.KernelBtf != 0,
+ size: btfSize,
+ }, nil
+}
+
+// IsVmlinux returns true if the BTF is for the kernel itself.
+func (i *HandleInfo) IsVmlinux() bool {
+ return i.IsKernel && i.Name == "vmlinux"
+}
+
+// IsModule returns true if the BTF is for a kernel module.
+func (i *HandleInfo) IsModule() bool {
+ return i.IsKernel && i.Name != "vmlinux"
+}
+
+// HandleIterator allows enumerating BTF blobs loaded into the kernel.
+type HandleIterator struct {
+ // The ID of the current handle. Only valid after a call to Next.
+ ID ID
+ // The current Handle. Only valid until a call to Next.
+ // See Take if you want to retain the handle.
+ Handle *Handle
+ err error
+}
+
+// Next retrieves a handle for the next BTF object.
+//
+// Returns true if another BTF object was found. Call [HandleIterator.Err] after
+// the function returns false.
+func (it *HandleIterator) Next() bool {
+ id := it.ID
+ for {
+ attr := &sys.BtfGetNextIdAttr{Id: id}
+ err := sys.BtfGetNextId(attr)
+ if errors.Is(err, os.ErrNotExist) {
+ // There are no more BTF objects.
+ break
+ } else if err != nil {
+ it.err = fmt.Errorf("get next BTF ID: %w", err)
+ break
+ }
+
+ id = attr.NextId
+ handle, err := NewHandleFromID(id)
+ if errors.Is(err, os.ErrNotExist) {
+ // Try again with the next ID.
+ continue
+ } else if err != nil {
+ it.err = fmt.Errorf("retrieve handle for ID %d: %w", id, err)
+ break
+ }
+
+ it.Handle.Close()
+ it.ID, it.Handle = id, handle
+ return true
+ }
+
+ // No more handles or we encountered an error.
+ it.Handle.Close()
+ it.Handle = nil
+ return false
+}
+
+// Take the ownership of the current handle.
+//
+// It's the callers responsibility to close the handle.
+func (it *HandleIterator) Take() *Handle {
+ handle := it.Handle
+ it.Handle = nil
+ return handle
+}
+
+// Err returns an error if iteration failed for some reason.
+func (it *HandleIterator) Err() error {
+ return it.err
+}
+
+// FindHandle returns the first handle for which predicate returns true.
+//
+// Requires CAP_SYS_ADMIN.
+//
+// Returns an error wrapping ErrNotFound if predicate never returns true or if
+// there is no BTF loaded into the kernel.
+func FindHandle(predicate func(info *HandleInfo) bool) (*Handle, error) {
+ it := new(HandleIterator)
+ defer it.Handle.Close()
+
+ for it.Next() {
+ info, err := it.Handle.Info()
+ if err != nil {
+ return nil, fmt.Errorf("info for ID %d: %w", it.ID, err)
+ }
+
+ if predicate(info) {
+ return it.Take(), nil
+ }
+ }
+ if err := it.Err(); err != nil {
+ return nil, fmt.Errorf("iterate handles: %w", err)
+ }
+
+ return nil, fmt.Errorf("find handle: %w", ErrNotFound)
+}
diff --git a/vendor/github.com/cilium/ebpf/btf/marshal.go b/vendor/github.com/cilium/ebpf/btf/marshal.go
new file mode 100644
index 000000000..0d093c665
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/btf/marshal.go
@@ -0,0 +1,596 @@
+package btf
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "math"
+ "sync"
+
+ "github.com/cilium/ebpf/internal"
+
+ "golang.org/x/exp/slices"
+)
+
+type MarshalOptions struct {
+ // Target byte order. Defaults to the system's native endianness.
+ Order binary.ByteOrder
+ // Remove function linkage information for compatibility with <5.6 kernels.
+ StripFuncLinkage bool
+ // Replace Enum64 with a placeholder for compatibility with <6.0 kernels.
+ ReplaceEnum64 bool
+}
+
+// KernelMarshalOptions will generate BTF suitable for the current kernel.
+func KernelMarshalOptions() *MarshalOptions {
+ return &MarshalOptions{
+ Order: internal.NativeEndian,
+ StripFuncLinkage: haveFuncLinkage() != nil,
+ ReplaceEnum64: haveEnum64() != nil,
+ }
+}
+
+// encoder turns Types into raw BTF.
+type encoder struct {
+ MarshalOptions
+
+ pending internal.Deque[Type]
+ buf *bytes.Buffer
+ strings *stringTableBuilder
+ ids map[Type]TypeID
+ lastID TypeID
+}
+
+var bufferPool = sync.Pool{
+ New: func() any {
+ buf := make([]byte, btfHeaderLen+128)
+ return &buf
+ },
+}
+
+func getByteSlice() *[]byte {
+ return bufferPool.Get().(*[]byte)
+}
+
+func putByteSlice(buf *[]byte) {
+ *buf = (*buf)[:0]
+ bufferPool.Put(buf)
+}
+
+// Builder turns Types into raw BTF.
+//
+// The default value may be used and represents an empty BTF blob. Void is
+// added implicitly if necessary.
+type Builder struct {
+ // Explicitly added types.
+ types []Type
+ // IDs for all added types which the user knows about.
+ stableIDs map[Type]TypeID
+ // Explicitly added strings.
+ strings *stringTableBuilder
+}
+
+// NewBuilder creates a Builder from a list of types.
+//
+// It is more efficient than calling [Add] individually.
+//
+// Returns an error if adding any of the types fails.
+func NewBuilder(types []Type) (*Builder, error) {
+ b := &Builder{
+ make([]Type, 0, len(types)),
+ make(map[Type]TypeID, len(types)),
+ nil,
+ }
+
+ for _, typ := range types {
+ _, err := b.Add(typ)
+ if err != nil {
+ return nil, fmt.Errorf("add %s: %w", typ, err)
+ }
+ }
+
+ return b, nil
+}
+
+// Add a Type and allocate a stable ID for it.
+//
+// Adding the identical Type multiple times is valid and will return the same ID.
+//
+// See [Type] for details on identity.
+func (b *Builder) Add(typ Type) (TypeID, error) {
+ if b.stableIDs == nil {
+ b.stableIDs = make(map[Type]TypeID)
+ }
+
+ if _, ok := typ.(*Void); ok {
+ // Equality is weird for void, since it is a zero sized type.
+ return 0, nil
+ }
+
+ if ds, ok := typ.(*Datasec); ok {
+ if err := datasecResolveWorkaround(b, ds); err != nil {
+ return 0, err
+ }
+ }
+
+ id, ok := b.stableIDs[typ]
+ if ok {
+ return id, nil
+ }
+
+ b.types = append(b.types, typ)
+
+ id = TypeID(len(b.types))
+ if int(id) != len(b.types) {
+ return 0, fmt.Errorf("no more type IDs")
+ }
+
+ b.stableIDs[typ] = id
+ return id, nil
+}
+
+// Marshal encodes all types in the Marshaler into BTF wire format.
+//
+// opts may be nil.
+func (b *Builder) Marshal(buf []byte, opts *MarshalOptions) ([]byte, error) {
+ stb := b.strings
+ if stb == nil {
+ // Assume that most types are named. This makes encoding large BTF like
+ // vmlinux a lot cheaper.
+ stb = newStringTableBuilder(len(b.types))
+ } else {
+ // Avoid modifying the Builder's string table.
+ stb = b.strings.Copy()
+ }
+
+ if opts == nil {
+ opts = &MarshalOptions{Order: internal.NativeEndian}
+ }
+
+ // Reserve space for the BTF header.
+ buf = slices.Grow(buf, btfHeaderLen)[:btfHeaderLen]
+
+ w := internal.NewBuffer(buf)
+ defer internal.PutBuffer(w)
+
+ e := encoder{
+ MarshalOptions: *opts,
+ buf: w,
+ strings: stb,
+ lastID: TypeID(len(b.types)),
+ ids: make(map[Type]TypeID, len(b.types)),
+ }
+
+ // Ensure that types are marshaled in the exact order they were Add()ed.
+ // Otherwise the ID returned from Add() won't match.
+ e.pending.Grow(len(b.types))
+ for _, typ := range b.types {
+ e.pending.Push(typ)
+ e.ids[typ] = b.stableIDs[typ]
+ }
+
+ if err := e.deflatePending(); err != nil {
+ return nil, err
+ }
+
+ length := e.buf.Len()
+ typeLen := uint32(length - btfHeaderLen)
+
+ stringLen := e.strings.Length()
+ buf = e.strings.AppendEncoded(e.buf.Bytes())
+
+ // Fill out the header, and write it out.
+ header := &btfHeader{
+ Magic: btfMagic,
+ Version: 1,
+ Flags: 0,
+ HdrLen: uint32(btfHeaderLen),
+ TypeOff: 0,
+ TypeLen: typeLen,
+ StringOff: typeLen,
+ StringLen: uint32(stringLen),
+ }
+
+ err := binary.Write(sliceWriter(buf[:btfHeaderLen]), e.Order, header)
+ if err != nil {
+ return nil, fmt.Errorf("write header: %v", err)
+ }
+
+ return buf, nil
+}
+
+// addString adds a string to the resulting BTF.
+//
+// Adding the same string multiple times will return the same result.
+//
+// Returns an identifier into the string table or an error if the string
+// contains invalid characters.
+func (b *Builder) addString(str string) (uint32, error) {
+ if b.strings == nil {
+ b.strings = newStringTableBuilder(0)
+ }
+
+ return b.strings.Add(str)
+}
+
+func (e *encoder) allocateID(typ Type) error {
+ id := e.lastID + 1
+ if id < e.lastID {
+ return errors.New("type ID overflow")
+ }
+
+ e.pending.Push(typ)
+ e.ids[typ] = id
+ e.lastID = id
+ return nil
+}
+
+// id returns the ID for the given type or panics with an error.
+func (e *encoder) id(typ Type) TypeID {
+ if _, ok := typ.(*Void); ok {
+ return 0
+ }
+
+ id, ok := e.ids[typ]
+ if !ok {
+ panic(fmt.Errorf("no ID for type %v", typ))
+ }
+
+ return id
+}
+
+func (e *encoder) deflatePending() error {
+ // Declare root outside of the loop to avoid repeated heap allocations.
+ var root Type
+ skip := func(t Type) (skip bool) {
+ if t == root {
+ // Force descending into the current root type even if it already
+ // has an ID. Otherwise we miss children of types that have their
+ // ID pre-allocated via Add.
+ return false
+ }
+
+ _, isVoid := t.(*Void)
+ _, alreadyEncoded := e.ids[t]
+ return isVoid || alreadyEncoded
+ }
+
+ for !e.pending.Empty() {
+ root = e.pending.Shift()
+
+ // Allocate IDs for all children of typ, including transitive dependencies.
+ iter := postorderTraversal(root, skip)
+ for iter.Next() {
+ if iter.Type == root {
+ // The iterator yields root at the end, do not allocate another ID.
+ break
+ }
+
+ if err := e.allocateID(iter.Type); err != nil {
+ return err
+ }
+ }
+
+ if err := e.deflateType(root); err != nil {
+ id := e.ids[root]
+ return fmt.Errorf("deflate %v with ID %d: %w", root, id, err)
+ }
+ }
+
+ return nil
+}
+
+func (e *encoder) deflateType(typ Type) (err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ var ok bool
+ err, ok = r.(error)
+ if !ok {
+ panic(r)
+ }
+ }
+ }()
+
+ var raw rawType
+ raw.NameOff, err = e.strings.Add(typ.TypeName())
+ if err != nil {
+ return err
+ }
+
+ switch v := typ.(type) {
+ case *Void:
+ return errors.New("Void is implicit in BTF wire format")
+
+ case *Int:
+ raw.SetKind(kindInt)
+ raw.SetSize(v.Size)
+
+ var bi btfInt
+ bi.SetEncoding(v.Encoding)
+ // We need to set bits in addition to size, since btf_type_int_is_regular
+ // otherwise flags this as a bitfield.
+ bi.SetBits(byte(v.Size) * 8)
+ raw.data = bi
+
+ case *Pointer:
+ raw.SetKind(kindPointer)
+ raw.SetType(e.id(v.Target))
+
+ case *Array:
+ raw.SetKind(kindArray)
+ raw.data = &btfArray{
+ e.id(v.Type),
+ e.id(v.Index),
+ v.Nelems,
+ }
+
+ case *Struct:
+ raw.SetKind(kindStruct)
+ raw.SetSize(v.Size)
+ raw.data, err = e.convertMembers(&raw.btfType, v.Members)
+
+ case *Union:
+ err = e.deflateUnion(&raw, v)
+
+ case *Enum:
+ if v.Size == 8 {
+ err = e.deflateEnum64(&raw, v)
+ } else {
+ err = e.deflateEnum(&raw, v)
+ }
+
+ case *Fwd:
+ raw.SetKind(kindForward)
+ raw.SetFwdKind(v.Kind)
+
+ case *Typedef:
+ raw.SetKind(kindTypedef)
+ raw.SetType(e.id(v.Type))
+
+ case *Volatile:
+ raw.SetKind(kindVolatile)
+ raw.SetType(e.id(v.Type))
+
+ case *Const:
+ raw.SetKind(kindConst)
+ raw.SetType(e.id(v.Type))
+
+ case *Restrict:
+ raw.SetKind(kindRestrict)
+ raw.SetType(e.id(v.Type))
+
+ case *Func:
+ raw.SetKind(kindFunc)
+ raw.SetType(e.id(v.Type))
+ if !e.StripFuncLinkage {
+ raw.SetLinkage(v.Linkage)
+ }
+
+ case *FuncProto:
+ raw.SetKind(kindFuncProto)
+ raw.SetType(e.id(v.Return))
+ raw.SetVlen(len(v.Params))
+ raw.data, err = e.deflateFuncParams(v.Params)
+
+ case *Var:
+ raw.SetKind(kindVar)
+ raw.SetType(e.id(v.Type))
+ raw.data = btfVariable{uint32(v.Linkage)}
+
+ case *Datasec:
+ raw.SetKind(kindDatasec)
+ raw.SetSize(v.Size)
+ raw.SetVlen(len(v.Vars))
+ raw.data = e.deflateVarSecinfos(v.Vars)
+
+ case *Float:
+ raw.SetKind(kindFloat)
+ raw.SetSize(v.Size)
+
+ case *declTag:
+ raw.SetKind(kindDeclTag)
+ raw.SetType(e.id(v.Type))
+ raw.data = &btfDeclTag{uint32(v.Index)}
+ raw.NameOff, err = e.strings.Add(v.Value)
+
+ case *typeTag:
+ raw.SetKind(kindTypeTag)
+ raw.SetType(e.id(v.Type))
+ raw.NameOff, err = e.strings.Add(v.Value)
+
+ default:
+ return fmt.Errorf("don't know how to deflate %T", v)
+ }
+
+ if err != nil {
+ return err
+ }
+
+ return raw.Marshal(e.buf, e.Order)
+}
+
+func (e *encoder) deflateUnion(raw *rawType, union *Union) (err error) {
+ raw.SetKind(kindUnion)
+ raw.SetSize(union.Size)
+ raw.data, err = e.convertMembers(&raw.btfType, union.Members)
+ return
+}
+
+func (e *encoder) convertMembers(header *btfType, members []Member) ([]btfMember, error) {
+ bms := make([]btfMember, 0, len(members))
+ isBitfield := false
+ for _, member := range members {
+ isBitfield = isBitfield || member.BitfieldSize > 0
+
+ offset := member.Offset
+ if isBitfield {
+ offset = member.BitfieldSize<<24 | (member.Offset & 0xffffff)
+ }
+
+ nameOff, err := e.strings.Add(member.Name)
+ if err != nil {
+ return nil, err
+ }
+
+ bms = append(bms, btfMember{
+ nameOff,
+ e.id(member.Type),
+ uint32(offset),
+ })
+ }
+
+ header.SetVlen(len(members))
+ header.SetBitfield(isBitfield)
+ return bms, nil
+}
+
+func (e *encoder) deflateEnum(raw *rawType, enum *Enum) (err error) {
+ raw.SetKind(kindEnum)
+ raw.SetSize(enum.Size)
+ raw.SetVlen(len(enum.Values))
+ // Signedness appeared together with ENUM64 support.
+ raw.SetSigned(enum.Signed && !e.ReplaceEnum64)
+ raw.data, err = e.deflateEnumValues(enum)
+ return
+}
+
+func (e *encoder) deflateEnumValues(enum *Enum) ([]btfEnum, error) {
+ bes := make([]btfEnum, 0, len(enum.Values))
+ for _, value := range enum.Values {
+ nameOff, err := e.strings.Add(value.Name)
+ if err != nil {
+ return nil, err
+ }
+
+ if enum.Signed {
+ if signedValue := int64(value.Value); signedValue < math.MinInt32 || signedValue > math.MaxInt32 {
+ return nil, fmt.Errorf("value %d of enum %q exceeds 32 bits", signedValue, value.Name)
+ }
+ } else {
+ if value.Value > math.MaxUint32 {
+ return nil, fmt.Errorf("value %d of enum %q exceeds 32 bits", value.Value, value.Name)
+ }
+ }
+
+ bes = append(bes, btfEnum{
+ nameOff,
+ uint32(value.Value),
+ })
+ }
+
+ return bes, nil
+}
+
+func (e *encoder) deflateEnum64(raw *rawType, enum *Enum) (err error) {
+ if e.ReplaceEnum64 {
+ // Replace the ENUM64 with a union of fields with the correct size.
+ // This matches libbpf behaviour on purpose.
+ placeholder := &Int{
+ "enum64_placeholder",
+ enum.Size,
+ Unsigned,
+ }
+ if enum.Signed {
+ placeholder.Encoding = Signed
+ }
+ if err := e.allocateID(placeholder); err != nil {
+ return fmt.Errorf("add enum64 placeholder: %w", err)
+ }
+
+ members := make([]Member, 0, len(enum.Values))
+ for _, v := range enum.Values {
+ members = append(members, Member{
+ Name: v.Name,
+ Type: placeholder,
+ })
+ }
+
+ return e.deflateUnion(raw, &Union{enum.Name, enum.Size, members})
+ }
+
+ raw.SetKind(kindEnum64)
+ raw.SetSize(enum.Size)
+ raw.SetVlen(len(enum.Values))
+ raw.SetSigned(enum.Signed)
+ raw.data, err = e.deflateEnum64Values(enum.Values)
+ return
+}
+
+func (e *encoder) deflateEnum64Values(values []EnumValue) ([]btfEnum64, error) {
+ bes := make([]btfEnum64, 0, len(values))
+ for _, value := range values {
+ nameOff, err := e.strings.Add(value.Name)
+ if err != nil {
+ return nil, err
+ }
+
+ bes = append(bes, btfEnum64{
+ nameOff,
+ uint32(value.Value),
+ uint32(value.Value >> 32),
+ })
+ }
+
+ return bes, nil
+}
+
+func (e *encoder) deflateFuncParams(params []FuncParam) ([]btfParam, error) {
+ bps := make([]btfParam, 0, len(params))
+ for _, param := range params {
+ nameOff, err := e.strings.Add(param.Name)
+ if err != nil {
+ return nil, err
+ }
+
+ bps = append(bps, btfParam{
+ nameOff,
+ e.id(param.Type),
+ })
+ }
+ return bps, nil
+}
+
+func (e *encoder) deflateVarSecinfos(vars []VarSecinfo) []btfVarSecinfo {
+ vsis := make([]btfVarSecinfo, 0, len(vars))
+ for _, v := range vars {
+ vsis = append(vsis, btfVarSecinfo{
+ e.id(v.Type),
+ v.Offset,
+ v.Size,
+ })
+ }
+ return vsis
+}
+
+// MarshalMapKV creates a BTF object containing a map key and value.
+//
+// The function is intended for the use of the ebpf package and may be removed
+// at any point in time.
+func MarshalMapKV(key, value Type) (_ *Handle, keyID, valueID TypeID, err error) {
+ var b Builder
+
+ if key != nil {
+ keyID, err = b.Add(key)
+ if err != nil {
+ return nil, 0, 0, fmt.Errorf("add key type: %w", err)
+ }
+ }
+
+ if value != nil {
+ valueID, err = b.Add(value)
+ if err != nil {
+ return nil, 0, 0, fmt.Errorf("add value type: %w", err)
+ }
+ }
+
+ handle, err := NewHandle(&b)
+ if err != nil {
+ // Check for 'full' map BTF support, since kernels between 4.18 and 5.2
+ // already support BTF blobs for maps without Var or Datasec just fine.
+ if err := haveMapBTF(); err != nil {
+ return nil, 0, 0, err
+ }
+ }
+ return handle, keyID, valueID, err
+}
diff --git a/vendor/github.com/cilium/ebpf/btf/strings.go b/vendor/github.com/cilium/ebpf/btf/strings.go
new file mode 100644
index 000000000..0ddf1d24f
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/btf/strings.go
@@ -0,0 +1,195 @@
+package btf
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "strings"
+
+ "golang.org/x/exp/maps"
+ "golang.org/x/exp/slices"
+)
+
+type stringTable struct {
+ base *stringTable
+ offsets []uint32
+ strings []string
+}
+
+// sizedReader is implemented by bytes.Reader, io.SectionReader, strings.Reader, etc.
+type sizedReader interface {
+ io.Reader
+ Size() int64
+}
+
+func readStringTable(r sizedReader, base *stringTable) (*stringTable, error) {
+ // When parsing split BTF's string table, the first entry offset is derived
+ // from the last entry offset of the base BTF.
+ firstStringOffset := uint32(0)
+ if base != nil {
+ idx := len(base.offsets) - 1
+ firstStringOffset = base.offsets[idx] + uint32(len(base.strings[idx])) + 1
+ }
+
+ // Derived from vmlinux BTF.
+ const averageStringLength = 16
+
+ n := int(r.Size() / averageStringLength)
+ offsets := make([]uint32, 0, n)
+ strings := make([]string, 0, n)
+
+ offset := firstStringOffset
+ scanner := bufio.NewScanner(r)
+ scanner.Split(splitNull)
+ for scanner.Scan() {
+ str := scanner.Text()
+ offsets = append(offsets, offset)
+ strings = append(strings, str)
+ offset += uint32(len(str)) + 1
+ }
+ if err := scanner.Err(); err != nil {
+ return nil, err
+ }
+
+ if len(strings) == 0 {
+ return nil, errors.New("string table is empty")
+ }
+
+ if firstStringOffset == 0 && strings[0] != "" {
+ return nil, errors.New("first item in string table is non-empty")
+ }
+
+ return &stringTable{base, offsets, strings}, nil
+}
+
+func splitNull(data []byte, atEOF bool) (advance int, token []byte, err error) {
+ i := bytes.IndexByte(data, 0)
+ if i == -1 {
+ if atEOF && len(data) > 0 {
+ return 0, nil, errors.New("string table isn't null terminated")
+ }
+ return 0, nil, nil
+ }
+
+ return i + 1, data[:i], nil
+}
+
+func (st *stringTable) Lookup(offset uint32) (string, error) {
+ if st.base != nil && offset <= st.base.offsets[len(st.base.offsets)-1] {
+ return st.base.lookup(offset)
+ }
+ return st.lookup(offset)
+}
+
+func (st *stringTable) lookup(offset uint32) (string, error) {
+ i, found := slices.BinarySearch(st.offsets, offset)
+ if !found {
+ return "", fmt.Errorf("offset %d isn't start of a string", offset)
+ }
+
+ return st.strings[i], nil
+}
+
+func (st *stringTable) Marshal(w io.Writer) error {
+ for _, str := range st.strings {
+ _, err := io.WriteString(w, str)
+ if err != nil {
+ return err
+ }
+ _, err = w.Write([]byte{0})
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Num returns the number of strings in the table.
+func (st *stringTable) Num() int {
+ return len(st.strings)
+}
+
+// stringTableBuilder builds BTF string tables.
+type stringTableBuilder struct {
+ length uint32
+ strings map[string]uint32
+}
+
+// newStringTableBuilder creates a builder with the given capacity.
+//
+// capacity may be zero.
+func newStringTableBuilder(capacity int) *stringTableBuilder {
+ var stb stringTableBuilder
+
+ if capacity == 0 {
+ // Use the runtime's small default size.
+ stb.strings = make(map[string]uint32)
+ } else {
+ stb.strings = make(map[string]uint32, capacity)
+ }
+
+ // Ensure that the empty string is at index 0.
+ stb.append("")
+ return &stb
+}
+
+// Add a string to the table.
+//
+// Adding the same string multiple times will only store it once.
+func (stb *stringTableBuilder) Add(str string) (uint32, error) {
+ if strings.IndexByte(str, 0) != -1 {
+ return 0, fmt.Errorf("string contains null: %q", str)
+ }
+
+ offset, ok := stb.strings[str]
+ if ok {
+ return offset, nil
+ }
+
+ return stb.append(str), nil
+}
+
+func (stb *stringTableBuilder) append(str string) uint32 {
+ offset := stb.length
+ stb.length += uint32(len(str)) + 1
+ stb.strings[str] = offset
+ return offset
+}
+
+// Lookup finds the offset of a string in the table.
+//
+// Returns an error if str hasn't been added yet.
+func (stb *stringTableBuilder) Lookup(str string) (uint32, error) {
+ offset, ok := stb.strings[str]
+ if !ok {
+ return 0, fmt.Errorf("string %q is not in table", str)
+ }
+
+ return offset, nil
+}
+
+// Length returns the length in bytes.
+func (stb *stringTableBuilder) Length() int {
+ return int(stb.length)
+}
+
+// AppendEncoded appends the string table to the end of the provided buffer.
+func (stb *stringTableBuilder) AppendEncoded(buf []byte) []byte {
+ n := len(buf)
+ buf = append(buf, make([]byte, stb.Length())...)
+ strings := buf[n:]
+ for str, offset := range stb.strings {
+ copy(strings[offset:], str)
+ }
+ return buf
+}
+
+// Copy the string table builder.
+func (stb *stringTableBuilder) Copy() *stringTableBuilder {
+ return &stringTableBuilder{
+ stb.length,
+ maps.Clone(stb.strings),
+ }
+}
diff --git a/vendor/github.com/cilium/ebpf/btf/traversal.go b/vendor/github.com/cilium/ebpf/btf/traversal.go
new file mode 100644
index 000000000..a3a9dec94
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/btf/traversal.go
@@ -0,0 +1,141 @@
+package btf
+
+import (
+ "fmt"
+
+ "github.com/cilium/ebpf/internal"
+)
+
+// Functions to traverse a cyclic graph of types. The below was very useful:
+// https://eli.thegreenplace.net/2015/directed-graph-traversal-orderings-and-applications-to-data-flow-analysis/#post-order-and-reverse-post-order
+
+type postorderIterator struct {
+ // Iteration skips types for which this function returns true.
+ skip func(Type) bool
+ // The root type. May be nil if skip(root) is true.
+ root Type
+
+ // Contains types which need to be either walked or yielded.
+ types typeDeque
+ // Contains a boolean whether the type has been walked or not.
+ walked internal.Deque[bool]
+ // The set of types which has been pushed onto types.
+ pushed map[Type]struct{}
+
+ // The current type. Only valid after a call to Next().
+ Type Type
+}
+
+// postorderTraversal iterates all types reachable from root by visiting the
+// leaves of the graph first.
+//
+// Types for which skip returns true are ignored. skip may be nil.
+func postorderTraversal(root Type, skip func(Type) (skip bool)) postorderIterator {
+ // Avoid allocations for the common case of a skipped root.
+ if skip != nil && skip(root) {
+ return postorderIterator{}
+ }
+
+ po := postorderIterator{root: root, skip: skip}
+ walkType(root, po.push)
+
+ return po
+}
+
+func (po *postorderIterator) push(t *Type) {
+ if _, ok := po.pushed[*t]; ok || *t == po.root {
+ return
+ }
+
+ if po.skip != nil && po.skip(*t) {
+ return
+ }
+
+ if po.pushed == nil {
+ // Lazily allocate pushed to avoid an allocation for Types without children.
+ po.pushed = make(map[Type]struct{})
+ }
+
+ po.pushed[*t] = struct{}{}
+ po.types.Push(t)
+ po.walked.Push(false)
+}
+
+// Next returns true if there is another Type to traverse.
+func (po *postorderIterator) Next() bool {
+ for !po.types.Empty() {
+ t := po.types.Pop()
+
+ if !po.walked.Pop() {
+ // Push the type again, so that we re-evaluate it in done state
+ // after all children have been handled.
+ po.types.Push(t)
+ po.walked.Push(true)
+
+ // Add all direct children to todo.
+ walkType(*t, po.push)
+ } else {
+ // We've walked this type previously, so we now know that all
+ // children have been handled.
+ po.Type = *t
+ return true
+ }
+ }
+
+ // Only return root once.
+ po.Type, po.root = po.root, nil
+ return po.Type != nil
+}
+
+// walkType calls fn on each child of typ.
+func walkType(typ Type, fn func(*Type)) {
+ // Explicitly type switch on the most common types to allow the inliner to
+ // do its work. This avoids allocating intermediate slices from walk() on
+ // the heap.
+ switch v := typ.(type) {
+ case *Void, *Int, *Enum, *Fwd, *Float:
+ // No children to traverse.
+ case *Pointer:
+ fn(&v.Target)
+ case *Array:
+ fn(&v.Index)
+ fn(&v.Type)
+ case *Struct:
+ for i := range v.Members {
+ fn(&v.Members[i].Type)
+ }
+ case *Union:
+ for i := range v.Members {
+ fn(&v.Members[i].Type)
+ }
+ case *Typedef:
+ fn(&v.Type)
+ case *Volatile:
+ fn(&v.Type)
+ case *Const:
+ fn(&v.Type)
+ case *Restrict:
+ fn(&v.Type)
+ case *Func:
+ fn(&v.Type)
+ case *FuncProto:
+ fn(&v.Return)
+ for i := range v.Params {
+ fn(&v.Params[i].Type)
+ }
+ case *Var:
+ fn(&v.Type)
+ case *Datasec:
+ for i := range v.Vars {
+ fn(&v.Vars[i].Type)
+ }
+ case *declTag:
+ fn(&v.Type)
+ case *typeTag:
+ fn(&v.Type)
+ case *cycle:
+ // cycle has children, but we ignore them deliberately.
+ default:
+ panic(fmt.Sprintf("don't know how to walk Type %T", v))
+ }
+}
diff --git a/vendor/github.com/cilium/ebpf/btf/types.go b/vendor/github.com/cilium/ebpf/btf/types.go
new file mode 100644
index 000000000..5aedd72d8
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/btf/types.go
@@ -0,0 +1,1243 @@
+package btf
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "reflect"
+ "strings"
+
+ "github.com/cilium/ebpf/asm"
+ "github.com/cilium/ebpf/internal"
+ "github.com/cilium/ebpf/internal/sys"
+)
+
+const maxTypeDepth = 32
+
+// TypeID identifies a type in a BTF section.
+type TypeID = sys.TypeID
+
+// Type represents a type described by BTF.
+//
+// Identity of Type follows the [Go specification]: two Types are considered
+// equal if they have the same concrete type and the same dynamic value, aka
+// they point at the same location in memory. This means that the following
+// Types are considered distinct even though they have the same "shape".
+//
+// a := &Int{Size: 1}
+// b := &Int{Size: 1}
+// a != b
+//
+// [Go specification]: https://go.dev/ref/spec#Comparison_operators
+type Type interface {
+ // Type can be formatted using the %s and %v verbs. %s outputs only the
+ // identity of the type, without any detail. %v outputs additional detail.
+ //
+ // Use the '+' flag to include the address of the type.
+ //
+ // Use the width to specify how many levels of detail to output, for example
+ // %1v will output detail for the root type and a short description of its
+ // children. %2v would output details of the root type and its children
+ // as well as a short description of the grandchildren.
+ fmt.Formatter
+
+ // Name of the type, empty for anonymous types and types that cannot
+ // carry a name, like Void and Pointer.
+ TypeName() string
+
+ // Make a copy of the type, without copying Type members.
+ copy() Type
+
+ // New implementations must update walkType.
+}
+
+var (
+ _ Type = (*Int)(nil)
+ _ Type = (*Struct)(nil)
+ _ Type = (*Union)(nil)
+ _ Type = (*Enum)(nil)
+ _ Type = (*Fwd)(nil)
+ _ Type = (*Func)(nil)
+ _ Type = (*Typedef)(nil)
+ _ Type = (*Var)(nil)
+ _ Type = (*Datasec)(nil)
+ _ Type = (*Float)(nil)
+ _ Type = (*declTag)(nil)
+ _ Type = (*typeTag)(nil)
+ _ Type = (*cycle)(nil)
+)
+
+// Void is the unit type of BTF.
+type Void struct{}
+
+func (v *Void) Format(fs fmt.State, verb rune) { formatType(fs, verb, v) }
+func (v *Void) TypeName() string { return "" }
+func (v *Void) size() uint32 { return 0 }
+func (v *Void) copy() Type { return (*Void)(nil) }
+
+type IntEncoding byte
+
+// Valid IntEncodings.
+//
+// These may look like they are flags, but they aren't.
+const (
+ Unsigned IntEncoding = 0
+ Signed IntEncoding = 1
+ Char IntEncoding = 2
+ Bool IntEncoding = 4
+)
+
+func (ie IntEncoding) String() string {
+ switch ie {
+ case Char:
+ // NB: There is no way to determine signedness for char.
+ return "char"
+ case Bool:
+ return "bool"
+ case Signed:
+ return "signed"
+ case Unsigned:
+ return "unsigned"
+ default:
+ return fmt.Sprintf("IntEncoding(%d)", byte(ie))
+ }
+}
+
+// Int is an integer of a given length.
+//
+// See https://www.kernel.org/doc/html/latest/bpf/btf.html#btf-kind-int
+type Int struct {
+ Name string
+
+ // The size of the integer in bytes.
+ Size uint32
+ Encoding IntEncoding
+}
+
+func (i *Int) Format(fs fmt.State, verb rune) {
+ formatType(fs, verb, i, i.Encoding, "size=", i.Size*8)
+}
+
+func (i *Int) TypeName() string { return i.Name }
+func (i *Int) size() uint32 { return i.Size }
+func (i *Int) copy() Type {
+ cpy := *i
+ return &cpy
+}
+
+// Pointer is a pointer to another type.
+type Pointer struct {
+ Target Type
+}
+
+func (p *Pointer) Format(fs fmt.State, verb rune) {
+ formatType(fs, verb, p, "target=", p.Target)
+}
+
+func (p *Pointer) TypeName() string { return "" }
+func (p *Pointer) size() uint32 { return 8 }
+func (p *Pointer) copy() Type {
+ cpy := *p
+ return &cpy
+}
+
+// Array is an array with a fixed number of elements.
+type Array struct {
+ Index Type
+ Type Type
+ Nelems uint32
+}
+
+func (arr *Array) Format(fs fmt.State, verb rune) {
+ formatType(fs, verb, arr, "index=", arr.Index, "type=", arr.Type, "n=", arr.Nelems)
+}
+
+func (arr *Array) TypeName() string { return "" }
+
+func (arr *Array) copy() Type {
+ cpy := *arr
+ return &cpy
+}
+
+// Struct is a compound type of consecutive members.
+type Struct struct {
+ Name string
+ // The size of the struct including padding, in bytes
+ Size uint32
+ Members []Member
+}
+
+func (s *Struct) Format(fs fmt.State, verb rune) {
+ formatType(fs, verb, s, "fields=", len(s.Members))
+}
+
+func (s *Struct) TypeName() string { return s.Name }
+
+func (s *Struct) size() uint32 { return s.Size }
+
+func (s *Struct) copy() Type {
+ cpy := *s
+ cpy.Members = copyMembers(s.Members)
+ return &cpy
+}
+
+func (s *Struct) members() []Member {
+ return s.Members
+}
+
+// Union is a compound type where members occupy the same memory.
+type Union struct {
+ Name string
+ // The size of the union including padding, in bytes.
+ Size uint32
+ Members []Member
+}
+
+func (u *Union) Format(fs fmt.State, verb rune) {
+ formatType(fs, verb, u, "fields=", len(u.Members))
+}
+
+func (u *Union) TypeName() string { return u.Name }
+
+func (u *Union) size() uint32 { return u.Size }
+
+func (u *Union) copy() Type {
+ cpy := *u
+ cpy.Members = copyMembers(u.Members)
+ return &cpy
+}
+
+func (u *Union) members() []Member {
+ return u.Members
+}
+
+func copyMembers(orig []Member) []Member {
+ cpy := make([]Member, len(orig))
+ copy(cpy, orig)
+ return cpy
+}
+
+type composite interface {
+ Type
+ members() []Member
+}
+
+var (
+ _ composite = (*Struct)(nil)
+ _ composite = (*Union)(nil)
+)
+
+// A value in bits.
+type Bits uint32
+
+// Bytes converts a bit value into bytes.
+func (b Bits) Bytes() uint32 {
+ return uint32(b / 8)
+}
+
+// Member is part of a Struct or Union.
+//
+// It is not a valid Type.
+type Member struct {
+ Name string
+ Type Type
+ Offset Bits
+ BitfieldSize Bits
+}
+
+// Enum lists possible values.
+type Enum struct {
+ Name string
+ // Size of the enum value in bytes.
+ Size uint32
+ // True if the values should be interpreted as signed integers.
+ Signed bool
+ Values []EnumValue
+}
+
+func (e *Enum) Format(fs fmt.State, verb rune) {
+ formatType(fs, verb, e, "size=", e.Size, "values=", len(e.Values))
+}
+
+func (e *Enum) TypeName() string { return e.Name }
+
+// EnumValue is part of an Enum
+//
+// Is is not a valid Type
+type EnumValue struct {
+ Name string
+ Value uint64
+}
+
+func (e *Enum) size() uint32 { return e.Size }
+func (e *Enum) copy() Type {
+ cpy := *e
+ cpy.Values = make([]EnumValue, len(e.Values))
+ copy(cpy.Values, e.Values)
+ return &cpy
+}
+
+// FwdKind is the type of forward declaration.
+type FwdKind int
+
+// Valid types of forward declaration.
+const (
+ FwdStruct FwdKind = iota
+ FwdUnion
+)
+
+func (fk FwdKind) String() string {
+ switch fk {
+ case FwdStruct:
+ return "struct"
+ case FwdUnion:
+ return "union"
+ default:
+ return fmt.Sprintf("%T(%d)", fk, int(fk))
+ }
+}
+
+// Fwd is a forward declaration of a Type.
+type Fwd struct {
+ Name string
+ Kind FwdKind
+}
+
+func (f *Fwd) Format(fs fmt.State, verb rune) {
+ formatType(fs, verb, f, f.Kind)
+}
+
+func (f *Fwd) TypeName() string { return f.Name }
+
+func (f *Fwd) copy() Type {
+ cpy := *f
+ return &cpy
+}
+
+// Typedef is an alias of a Type.
+type Typedef struct {
+ Name string
+ Type Type
+}
+
+func (td *Typedef) Format(fs fmt.State, verb rune) {
+ formatType(fs, verb, td, td.Type)
+}
+
+func (td *Typedef) TypeName() string { return td.Name }
+
+func (td *Typedef) copy() Type {
+ cpy := *td
+ return &cpy
+}
+
+// Volatile is a qualifier.
+type Volatile struct {
+ Type Type
+}
+
+func (v *Volatile) Format(fs fmt.State, verb rune) {
+ formatType(fs, verb, v, v.Type)
+}
+
+func (v *Volatile) TypeName() string { return "" }
+
+func (v *Volatile) qualify() Type { return v.Type }
+func (v *Volatile) copy() Type {
+ cpy := *v
+ return &cpy
+}
+
+// Const is a qualifier.
+type Const struct {
+ Type Type
+}
+
+func (c *Const) Format(fs fmt.State, verb rune) {
+ formatType(fs, verb, c, c.Type)
+}
+
+func (c *Const) TypeName() string { return "" }
+
+func (c *Const) qualify() Type { return c.Type }
+func (c *Const) copy() Type {
+ cpy := *c
+ return &cpy
+}
+
+// Restrict is a qualifier.
+type Restrict struct {
+ Type Type
+}
+
+func (r *Restrict) Format(fs fmt.State, verb rune) {
+ formatType(fs, verb, r, r.Type)
+}
+
+func (r *Restrict) TypeName() string { return "" }
+
+func (r *Restrict) qualify() Type { return r.Type }
+func (r *Restrict) copy() Type {
+ cpy := *r
+ return &cpy
+}
+
+// Func is a function definition.
+type Func struct {
+ Name string
+ Type Type
+ Linkage FuncLinkage
+}
+
+func FuncMetadata(ins *asm.Instruction) *Func {
+ fn, _ := ins.Metadata.Get(funcInfoMeta{}).(*Func)
+ return fn
+}
+
+// WithFuncMetadata adds a btf.Func to the Metadata of asm.Instruction.
+func WithFuncMetadata(ins asm.Instruction, fn *Func) asm.Instruction {
+ ins.Metadata.Set(funcInfoMeta{}, fn)
+ return ins
+}
+
+func (f *Func) Format(fs fmt.State, verb rune) {
+ formatType(fs, verb, f, f.Linkage, "proto=", f.Type)
+}
+
+func (f *Func) TypeName() string { return f.Name }
+
+func (f *Func) copy() Type {
+ cpy := *f
+ return &cpy
+}
+
+// FuncProto is a function declaration.
+type FuncProto struct {
+ Return Type
+ Params []FuncParam
+}
+
+func (fp *FuncProto) Format(fs fmt.State, verb rune) {
+ formatType(fs, verb, fp, "args=", len(fp.Params), "return=", fp.Return)
+}
+
+func (fp *FuncProto) TypeName() string { return "" }
+
+func (fp *FuncProto) copy() Type {
+ cpy := *fp
+ cpy.Params = make([]FuncParam, len(fp.Params))
+ copy(cpy.Params, fp.Params)
+ return &cpy
+}
+
+type FuncParam struct {
+ Name string
+ Type Type
+}
+
+// Var is a global variable.
+type Var struct {
+ Name string
+ Type Type
+ Linkage VarLinkage
+}
+
+func (v *Var) Format(fs fmt.State, verb rune) {
+ formatType(fs, verb, v, v.Linkage)
+}
+
+func (v *Var) TypeName() string { return v.Name }
+
+func (v *Var) copy() Type {
+ cpy := *v
+ return &cpy
+}
+
+// Datasec is a global program section containing data.
+type Datasec struct {
+ Name string
+ Size uint32
+ Vars []VarSecinfo
+}
+
+func (ds *Datasec) Format(fs fmt.State, verb rune) {
+ formatType(fs, verb, ds)
+}
+
+func (ds *Datasec) TypeName() string { return ds.Name }
+
+func (ds *Datasec) size() uint32 { return ds.Size }
+
+func (ds *Datasec) copy() Type {
+ cpy := *ds
+ cpy.Vars = make([]VarSecinfo, len(ds.Vars))
+ copy(cpy.Vars, ds.Vars)
+ return &cpy
+}
+
+// VarSecinfo describes variable in a Datasec.
+//
+// It is not a valid Type.
+type VarSecinfo struct {
+ // Var or Func.
+ Type Type
+ Offset uint32
+ Size uint32
+}
+
+// Float is a float of a given length.
+type Float struct {
+ Name string
+
+ // The size of the float in bytes.
+ Size uint32
+}
+
+func (f *Float) Format(fs fmt.State, verb rune) {
+ formatType(fs, verb, f, "size=", f.Size*8)
+}
+
+func (f *Float) TypeName() string { return f.Name }
+func (f *Float) size() uint32 { return f.Size }
+func (f *Float) copy() Type {
+ cpy := *f
+ return &cpy
+}
+
+// declTag associates metadata with a declaration.
+type declTag struct {
+ Type Type
+ Value string
+ // The index this tag refers to in the target type. For composite types,
+ // a value of -1 indicates that the tag refers to the whole type. Otherwise
+ // it indicates which member or argument the tag applies to.
+ Index int
+}
+
+func (dt *declTag) Format(fs fmt.State, verb rune) {
+ formatType(fs, verb, dt, "type=", dt.Type, "value=", dt.Value, "index=", dt.Index)
+}
+
+func (dt *declTag) TypeName() string { return "" }
+func (dt *declTag) copy() Type {
+ cpy := *dt
+ return &cpy
+}
+
+// typeTag associates metadata with a type.
+type typeTag struct {
+ Type Type
+ Value string
+}
+
+func (tt *typeTag) Format(fs fmt.State, verb rune) {
+ formatType(fs, verb, tt, "type=", tt.Type, "value=", tt.Value)
+}
+
+func (tt *typeTag) TypeName() string { return "" }
+func (tt *typeTag) qualify() Type { return tt.Type }
+func (tt *typeTag) copy() Type {
+ cpy := *tt
+ return &cpy
+}
+
+// cycle is a type which had to be elided since it exceeded maxTypeDepth.
+type cycle struct {
+ root Type
+}
+
+func (c *cycle) ID() TypeID { return math.MaxUint32 }
+func (c *cycle) Format(fs fmt.State, verb rune) { formatType(fs, verb, c, "root=", c.root) }
+func (c *cycle) TypeName() string { return "" }
+func (c *cycle) copy() Type {
+ cpy := *c
+ return &cpy
+}
+
+type sizer interface {
+ size() uint32
+}
+
+var (
+ _ sizer = (*Int)(nil)
+ _ sizer = (*Pointer)(nil)
+ _ sizer = (*Struct)(nil)
+ _ sizer = (*Union)(nil)
+ _ sizer = (*Enum)(nil)
+ _ sizer = (*Datasec)(nil)
+)
+
+type qualifier interface {
+ qualify() Type
+}
+
+var (
+ _ qualifier = (*Const)(nil)
+ _ qualifier = (*Restrict)(nil)
+ _ qualifier = (*Volatile)(nil)
+ _ qualifier = (*typeTag)(nil)
+)
+
+var errUnsizedType = errors.New("type is unsized")
+
+// Sizeof returns the size of a type in bytes.
+//
+// Returns an error if the size can't be computed.
+func Sizeof(typ Type) (int, error) {
+ var (
+ n = int64(1)
+ elem int64
+ )
+
+ for i := 0; i < maxTypeDepth; i++ {
+ switch v := typ.(type) {
+ case *Array:
+ if n > 0 && int64(v.Nelems) > math.MaxInt64/n {
+ return 0, fmt.Errorf("type %s: overflow", typ)
+ }
+
+ // Arrays may be of zero length, which allows
+ // n to be zero as well.
+ n *= int64(v.Nelems)
+ typ = v.Type
+ continue
+
+ case sizer:
+ elem = int64(v.size())
+
+ case *Typedef:
+ typ = v.Type
+ continue
+
+ case qualifier:
+ typ = v.qualify()
+ continue
+
+ default:
+ return 0, fmt.Errorf("type %T: %w", typ, errUnsizedType)
+ }
+
+ if n > 0 && elem > math.MaxInt64/n {
+ return 0, fmt.Errorf("type %s: overflow", typ)
+ }
+
+ size := n * elem
+ if int64(int(size)) != size {
+ return 0, fmt.Errorf("type %s: overflow", typ)
+ }
+
+ return int(size), nil
+ }
+
+ return 0, fmt.Errorf("type %s: exceeded type depth", typ)
+}
+
+// alignof returns the alignment of a type.
+//
+// Returns an error if the Type can't be aligned, like an integer with an uneven
+// size. Currently only supports the subset of types necessary for bitfield
+// relocations.
+func alignof(typ Type) (int, error) {
+ var n int
+
+ switch t := UnderlyingType(typ).(type) {
+ case *Enum:
+ n = int(t.size())
+ case *Int:
+ n = int(t.Size)
+ case *Array:
+ return alignof(t.Type)
+ default:
+ return 0, fmt.Errorf("can't calculate alignment of %T", t)
+ }
+
+ if !pow(n) {
+ return 0, fmt.Errorf("alignment value %d is not a power of two", n)
+ }
+
+ return n, nil
+}
+
+// pow returns true if n is a power of two.
+func pow(n int) bool {
+ return n != 0 && (n&(n-1)) == 0
+}
+
+// Transformer modifies a given Type and returns the result.
+//
+// For example, UnderlyingType removes any qualifiers or typedefs from a type.
+// See the example on Copy for how to use a transform.
+type Transformer func(Type) Type
+
+// Copy a Type recursively.
+//
+// typ may form a cycle. If transform is not nil, it is called with the
+// to be copied type, and the returned value is copied instead.
+func Copy(typ Type, transform Transformer) Type {
+ copies := copier{copies: make(map[Type]Type)}
+ copies.copy(&typ, transform)
+ return typ
+}
+
+// copy a slice of Types recursively.
+//
+// See Copy for the semantics.
+func copyTypes(types []Type, transform Transformer) []Type {
+ result := make([]Type, len(types))
+ copy(result, types)
+
+ copies := copier{copies: make(map[Type]Type, len(types))}
+ for i := range result {
+ copies.copy(&result[i], transform)
+ }
+
+ return result
+}
+
+type copier struct {
+ copies map[Type]Type
+ work typeDeque
+}
+
+func (c *copier) copy(typ *Type, transform Transformer) {
+ for t := typ; t != nil; t = c.work.Pop() {
+ // *t is the identity of the type.
+ if cpy := c.copies[*t]; cpy != nil {
+ *t = cpy
+ continue
+ }
+
+ var cpy Type
+ if transform != nil {
+ cpy = transform(*t).copy()
+ } else {
+ cpy = (*t).copy()
+ }
+
+ c.copies[*t] = cpy
+ *t = cpy
+
+ // Mark any nested types for copying.
+ walkType(cpy, c.work.Push)
+ }
+}
+
+type typeDeque = internal.Deque[*Type]
+
+// inflateRawTypes takes a list of raw btf types linked via type IDs, and turns
+// it into a graph of Types connected via pointers.
+//
+// If base is provided, then the raw types are considered to be of a split BTF
+// (e.g., a kernel module).
+//
+// Returns a slice of types indexed by TypeID. Since BTF ignores compilation
+// units, multiple types may share the same name. A Type may form a cyclic graph
+// by pointing at itself.
+func inflateRawTypes(rawTypes []rawType, rawStrings *stringTable, base *Spec) ([]Type, error) {
+ types := make([]Type, 0, len(rawTypes)+1) // +1 for Void added to base types
+
+ // Void is defined to always be type ID 0, and is thus omitted from BTF.
+ types = append(types, (*Void)(nil))
+
+ firstTypeID := TypeID(0)
+ if base != nil {
+ var err error
+ firstTypeID, err = base.nextTypeID()
+ if err != nil {
+ return nil, err
+ }
+
+ // Split BTF doesn't contain Void.
+ types = types[:0]
+ }
+
+ type fixupDef struct {
+ id TypeID
+ typ *Type
+ }
+
+ var fixups []fixupDef
+ fixup := func(id TypeID, typ *Type) bool {
+ if id < firstTypeID {
+ if baseType, err := base.TypeByID(id); err == nil {
+ *typ = baseType
+ return true
+ }
+ }
+
+ idx := int(id - firstTypeID)
+ if idx < len(types) {
+ // We've already inflated this type, fix it up immediately.
+ *typ = types[idx]
+ return true
+ }
+
+ fixups = append(fixups, fixupDef{id, typ})
+ return false
+ }
+
+ type assertion struct {
+ id TypeID
+ typ *Type
+ want reflect.Type
+ }
+
+ var assertions []assertion
+ fixupAndAssert := func(id TypeID, typ *Type, want reflect.Type) error {
+ if !fixup(id, typ) {
+ assertions = append(assertions, assertion{id, typ, want})
+ return nil
+ }
+
+ // The type has already been fixed up, check the type immediately.
+ if reflect.TypeOf(*typ) != want {
+ return fmt.Errorf("type ID %d: expected %s, got %T", id, want, *typ)
+ }
+ return nil
+ }
+
+ type bitfieldFixupDef struct {
+ id TypeID
+ m *Member
+ }
+
+ var (
+ legacyBitfields = make(map[TypeID][2]Bits) // offset, size
+ bitfieldFixups []bitfieldFixupDef
+ )
+ convertMembers := func(raw []btfMember, kindFlag bool) ([]Member, error) {
+ // NB: The fixup below relies on pre-allocating this array to
+ // work, since otherwise append might re-allocate members.
+ members := make([]Member, 0, len(raw))
+ for i, btfMember := range raw {
+ name, err := rawStrings.Lookup(btfMember.NameOff)
+ if err != nil {
+ return nil, fmt.Errorf("can't get name for member %d: %w", i, err)
+ }
+
+ members = append(members, Member{
+ Name: name,
+ Offset: Bits(btfMember.Offset),
+ })
+
+ m := &members[i]
+ fixup(raw[i].Type, &m.Type)
+
+ if kindFlag {
+ m.BitfieldSize = Bits(btfMember.Offset >> 24)
+ m.Offset &= 0xffffff
+ // We ignore legacy bitfield definitions if the current composite
+ // is a new-style bitfield. This is kind of safe since offset and
+ // size on the type of the member must be zero if kindFlat is set
+ // according to spec.
+ continue
+ }
+
+ // This may be a legacy bitfield, try to fix it up.
+ data, ok := legacyBitfields[raw[i].Type]
+ if ok {
+ // Bingo!
+ m.Offset += data[0]
+ m.BitfieldSize = data[1]
+ continue
+ }
+
+ if m.Type != nil {
+ // We couldn't find a legacy bitfield, but we know that the member's
+ // type has already been inflated. Hence we know that it can't be
+ // a legacy bitfield and there is nothing left to do.
+ continue
+ }
+
+ // We don't have fixup data, and the type we're pointing
+ // at hasn't been inflated yet. No choice but to defer
+ // the fixup.
+ bitfieldFixups = append(bitfieldFixups, bitfieldFixupDef{
+ raw[i].Type,
+ m,
+ })
+ }
+ return members, nil
+ }
+
+ var declTags []*declTag
+ for _, raw := range rawTypes {
+ var (
+ id = firstTypeID + TypeID(len(types))
+ typ Type
+ )
+
+ if id < firstTypeID {
+ return nil, fmt.Errorf("no more type IDs")
+ }
+
+ name, err := rawStrings.Lookup(raw.NameOff)
+ if err != nil {
+ return nil, fmt.Errorf("get name for type id %d: %w", id, err)
+ }
+
+ switch raw.Kind() {
+ case kindInt:
+ size := raw.Size()
+ bi := raw.data.(*btfInt)
+ if bi.Offset() > 0 || bi.Bits().Bytes() != size {
+ legacyBitfields[id] = [2]Bits{bi.Offset(), bi.Bits()}
+ }
+ typ = &Int{name, raw.Size(), bi.Encoding()}
+
+ case kindPointer:
+ ptr := &Pointer{nil}
+ fixup(raw.Type(), &ptr.Target)
+ typ = ptr
+
+ case kindArray:
+ btfArr := raw.data.(*btfArray)
+ arr := &Array{nil, nil, btfArr.Nelems}
+ fixup(btfArr.IndexType, &arr.Index)
+ fixup(btfArr.Type, &arr.Type)
+ typ = arr
+
+ case kindStruct:
+ members, err := convertMembers(raw.data.([]btfMember), raw.Bitfield())
+ if err != nil {
+ return nil, fmt.Errorf("struct %s (id %d): %w", name, id, err)
+ }
+ typ = &Struct{name, raw.Size(), members}
+
+ case kindUnion:
+ members, err := convertMembers(raw.data.([]btfMember), raw.Bitfield())
+ if err != nil {
+ return nil, fmt.Errorf("union %s (id %d): %w", name, id, err)
+ }
+ typ = &Union{name, raw.Size(), members}
+
+ case kindEnum:
+ rawvals := raw.data.([]btfEnum)
+ vals := make([]EnumValue, 0, len(rawvals))
+ signed := raw.Signed()
+ for i, btfVal := range rawvals {
+ name, err := rawStrings.Lookup(btfVal.NameOff)
+ if err != nil {
+ return nil, fmt.Errorf("get name for enum value %d: %s", i, err)
+ }
+ value := uint64(btfVal.Val)
+ if signed {
+ // Sign extend values to 64 bit.
+ value = uint64(int32(btfVal.Val))
+ }
+ vals = append(vals, EnumValue{name, value})
+ }
+ typ = &Enum{name, raw.Size(), signed, vals}
+
+ case kindForward:
+ typ = &Fwd{name, raw.FwdKind()}
+
+ case kindTypedef:
+ typedef := &Typedef{name, nil}
+ fixup(raw.Type(), &typedef.Type)
+ typ = typedef
+
+ case kindVolatile:
+ volatile := &Volatile{nil}
+ fixup(raw.Type(), &volatile.Type)
+ typ = volatile
+
+ case kindConst:
+ cnst := &Const{nil}
+ fixup(raw.Type(), &cnst.Type)
+ typ = cnst
+
+ case kindRestrict:
+ restrict := &Restrict{nil}
+ fixup(raw.Type(), &restrict.Type)
+ typ = restrict
+
+ case kindFunc:
+ fn := &Func{name, nil, raw.Linkage()}
+ if err := fixupAndAssert(raw.Type(), &fn.Type, reflect.TypeOf((*FuncProto)(nil))); err != nil {
+ return nil, err
+ }
+ typ = fn
+
+ case kindFuncProto:
+ rawparams := raw.data.([]btfParam)
+ params := make([]FuncParam, 0, len(rawparams))
+ for i, param := range rawparams {
+ name, err := rawStrings.Lookup(param.NameOff)
+ if err != nil {
+ return nil, fmt.Errorf("get name for func proto parameter %d: %s", i, err)
+ }
+ params = append(params, FuncParam{
+ Name: name,
+ })
+ }
+ for i := range params {
+ fixup(rawparams[i].Type, ¶ms[i].Type)
+ }
+
+ fp := &FuncProto{nil, params}
+ fixup(raw.Type(), &fp.Return)
+ typ = fp
+
+ case kindVar:
+ variable := raw.data.(*btfVariable)
+ v := &Var{name, nil, VarLinkage(variable.Linkage)}
+ fixup(raw.Type(), &v.Type)
+ typ = v
+
+ case kindDatasec:
+ btfVars := raw.data.([]btfVarSecinfo)
+ vars := make([]VarSecinfo, 0, len(btfVars))
+ for _, btfVar := range btfVars {
+ vars = append(vars, VarSecinfo{
+ Offset: btfVar.Offset,
+ Size: btfVar.Size,
+ })
+ }
+ for i := range vars {
+ fixup(btfVars[i].Type, &vars[i].Type)
+ }
+ typ = &Datasec{name, raw.Size(), vars}
+
+ case kindFloat:
+ typ = &Float{name, raw.Size()}
+
+ case kindDeclTag:
+ btfIndex := raw.data.(*btfDeclTag).ComponentIdx
+ if uint64(btfIndex) > math.MaxInt {
+ return nil, fmt.Errorf("type id %d: index exceeds int", id)
+ }
+
+ dt := &declTag{nil, name, int(int32(btfIndex))}
+ fixup(raw.Type(), &dt.Type)
+ typ = dt
+
+ declTags = append(declTags, dt)
+
+ case kindTypeTag:
+ tt := &typeTag{nil, name}
+ fixup(raw.Type(), &tt.Type)
+ typ = tt
+
+ case kindEnum64:
+ rawvals := raw.data.([]btfEnum64)
+ vals := make([]EnumValue, 0, len(rawvals))
+ for i, btfVal := range rawvals {
+ name, err := rawStrings.Lookup(btfVal.NameOff)
+ if err != nil {
+ return nil, fmt.Errorf("get name for enum64 value %d: %s", i, err)
+ }
+ value := (uint64(btfVal.ValHi32) << 32) | uint64(btfVal.ValLo32)
+ vals = append(vals, EnumValue{name, value})
+ }
+ typ = &Enum{name, raw.Size(), raw.Signed(), vals}
+
+ default:
+ return nil, fmt.Errorf("type id %d: unknown kind: %v", id, raw.Kind())
+ }
+
+ types = append(types, typ)
+ }
+
+ for _, fixup := range fixups {
+ if fixup.id < firstTypeID {
+ return nil, fmt.Errorf("fixup for base type id %d is not expected", fixup.id)
+ }
+
+ idx := int(fixup.id - firstTypeID)
+ if idx >= len(types) {
+ return nil, fmt.Errorf("reference to invalid type id: %d", fixup.id)
+ }
+
+ *fixup.typ = types[idx]
+ }
+
+ for _, bitfieldFixup := range bitfieldFixups {
+ if bitfieldFixup.id < firstTypeID {
+ return nil, fmt.Errorf("bitfield fixup from split to base types is not expected")
+ }
+
+ data, ok := legacyBitfields[bitfieldFixup.id]
+ if ok {
+ // This is indeed a legacy bitfield, fix it up.
+ bitfieldFixup.m.Offset += data[0]
+ bitfieldFixup.m.BitfieldSize = data[1]
+ }
+ }
+
+ for _, assertion := range assertions {
+ if reflect.TypeOf(*assertion.typ) != assertion.want {
+ return nil, fmt.Errorf("type ID %d: expected %s, got %T", assertion.id, assertion.want, *assertion.typ)
+ }
+ }
+
+ for _, dt := range declTags {
+ switch t := dt.Type.(type) {
+ case *Var, *Typedef:
+ if dt.Index != -1 {
+ return nil, fmt.Errorf("type %s: index %d is not -1", dt, dt.Index)
+ }
+
+ case composite:
+ if dt.Index >= len(t.members()) {
+ return nil, fmt.Errorf("type %s: index %d exceeds members of %s", dt, dt.Index, t)
+ }
+
+ case *Func:
+ if dt.Index >= len(t.Type.(*FuncProto).Params) {
+ return nil, fmt.Errorf("type %s: index %d exceeds params of %s", dt, dt.Index, t)
+ }
+
+ default:
+ return nil, fmt.Errorf("type %s: decl tag for type %s is not supported", dt, t)
+ }
+ }
+
+ return types, nil
+}
+
+// essentialName represents the name of a BTF type stripped of any flavor
+// suffixes after a ___ delimiter.
+type essentialName string
+
+// newEssentialName returns name without a ___ suffix.
+//
+// CO-RE has the concept of 'struct flavors', which are used to deal with
+// changes in kernel data structures. Anything after three underscores
+// in a type name is ignored for the purpose of finding a candidate type
+// in the kernel's BTF.
+func newEssentialName(name string) essentialName {
+ if name == "" {
+ return ""
+ }
+ lastIdx := strings.LastIndex(name, "___")
+ if lastIdx > 0 {
+ return essentialName(name[:lastIdx])
+ }
+ return essentialName(name)
+}
+
+// UnderlyingType skips qualifiers and Typedefs.
+func UnderlyingType(typ Type) Type {
+ result := typ
+ for depth := 0; depth <= maxTypeDepth; depth++ {
+ switch v := (result).(type) {
+ case qualifier:
+ result = v.qualify()
+ case *Typedef:
+ result = v.Type
+ default:
+ return result
+ }
+ }
+ return &cycle{typ}
+}
+
+// as returns typ if is of type T. Otherwise it peels qualifiers and Typedefs
+// until it finds a T.
+//
+// Returns the zero value and false if there is no T or if the type is nested
+// too deeply.
+func as[T Type](typ Type) (T, bool) {
+ for depth := 0; depth <= maxTypeDepth; depth++ {
+ switch v := (typ).(type) {
+ case T:
+ return v, true
+ case qualifier:
+ typ = v.qualify()
+ case *Typedef:
+ typ = v.Type
+ default:
+ goto notFound
+ }
+ }
+notFound:
+ var zero T
+ return zero, false
+}
+
+type formatState struct {
+ fmt.State
+ depth int
+}
+
+// formattableType is a subset of Type, to ease unit testing of formatType.
+type formattableType interface {
+ fmt.Formatter
+ TypeName() string
+}
+
+// formatType formats a type in a canonical form.
+//
+// Handles cyclical types by only printing cycles up to a certain depth. Elements
+// in extra are separated by spaces unless the preceding element is a string
+// ending in '='.
+func formatType(f fmt.State, verb rune, t formattableType, extra ...interface{}) {
+ if verb != 'v' && verb != 's' {
+ fmt.Fprintf(f, "{UNRECOGNIZED: %c}", verb)
+ return
+ }
+
+ _, _ = io.WriteString(f, internal.GoTypeName(t))
+
+ if name := t.TypeName(); name != "" {
+ // Output BTF type name if present.
+ fmt.Fprintf(f, ":%q", name)
+ }
+
+ if f.Flag('+') {
+ // Output address if requested.
+ fmt.Fprintf(f, ":%#p", t)
+ }
+
+ if verb == 's' {
+ // %s omits details.
+ return
+ }
+
+ var depth int
+ if ps, ok := f.(*formatState); ok {
+ depth = ps.depth
+ f = ps.State
+ }
+
+ maxDepth, ok := f.Width()
+ if !ok {
+ maxDepth = 0
+ }
+
+ if depth > maxDepth {
+ // We've reached the maximum depth. This avoids infinite recursion even
+ // for cyclical types.
+ return
+ }
+
+ if len(extra) == 0 {
+ return
+ }
+
+ wantSpace := false
+ _, _ = io.WriteString(f, "[")
+ for _, arg := range extra {
+ if wantSpace {
+ _, _ = io.WriteString(f, " ")
+ }
+
+ switch v := arg.(type) {
+ case string:
+ _, _ = io.WriteString(f, v)
+ wantSpace = len(v) > 0 && v[len(v)-1] != '='
+ continue
+
+ case formattableType:
+ v.Format(&formatState{f, depth + 1}, verb)
+
+ default:
+ fmt.Fprint(f, arg)
+ }
+
+ wantSpace = true
+ }
+ _, _ = io.WriteString(f, "]")
+}
diff --git a/vendor/github.com/cilium/ebpf/btf/workarounds.go b/vendor/github.com/cilium/ebpf/btf/workarounds.go
new file mode 100644
index 000000000..12a89b87e
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/btf/workarounds.go
@@ -0,0 +1,26 @@
+package btf
+
+// datasecResolveWorkaround ensures that certain vars in a Datasec are added
+// to a Spec before the Datasec. This avoids a bug in kernel BTF validation.
+//
+// See https://lore.kernel.org/bpf/20230302123440.1193507-1-lmb@isovalent.com/
+func datasecResolveWorkaround(b *Builder, ds *Datasec) error {
+ for _, vsi := range ds.Vars {
+ v, ok := vsi.Type.(*Var)
+ if !ok {
+ continue
+ }
+
+ switch v.Type.(type) {
+ case *Typedef, *Volatile, *Const, *Restrict, *typeTag:
+ // NB: We must never call Add on a Datasec, otherwise we risk
+ // infinite recursion.
+ _, err := b.Add(v.Type)
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/ebpf/collection.go b/vendor/github.com/cilium/ebpf/collection.go
new file mode 100644
index 000000000..a581ecf44
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/collection.go
@@ -0,0 +1,857 @@
+package ebpf
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "reflect"
+ "strings"
+
+ "github.com/cilium/ebpf/asm"
+ "github.com/cilium/ebpf/btf"
+ "github.com/cilium/ebpf/internal"
+ "github.com/cilium/ebpf/internal/kconfig"
+ "github.com/cilium/ebpf/internal/sysenc"
+)
+
+// CollectionOptions control loading a collection into the kernel.
+//
+// Maps and Programs are passed to NewMapWithOptions and NewProgramsWithOptions.
+type CollectionOptions struct {
+ Maps MapOptions
+ Programs ProgramOptions
+
+ // MapReplacements takes a set of Maps that will be used instead of
+ // creating new ones when loading the CollectionSpec.
+ //
+ // For each given Map, there must be a corresponding MapSpec in
+ // CollectionSpec.Maps, and its type, key/value size, max entries and flags
+ // must match the values of the MapSpec.
+ //
+ // The given Maps are Clone()d before being used in the Collection, so the
+ // caller can Close() them freely when they are no longer needed.
+ MapReplacements map[string]*Map
+}
+
+// CollectionSpec describes a collection.
+type CollectionSpec struct {
+ Maps map[string]*MapSpec
+ Programs map[string]*ProgramSpec
+
+ // Types holds type information about Maps and Programs.
+ // Modifications to Types are currently undefined behaviour.
+ Types *btf.Spec
+
+ // ByteOrder specifies whether the ELF was compiled for
+ // big-endian or little-endian architectures.
+ ByteOrder binary.ByteOrder
+}
+
+// Copy returns a recursive copy of the spec.
+func (cs *CollectionSpec) Copy() *CollectionSpec {
+ if cs == nil {
+ return nil
+ }
+
+ cpy := CollectionSpec{
+ Maps: make(map[string]*MapSpec, len(cs.Maps)),
+ Programs: make(map[string]*ProgramSpec, len(cs.Programs)),
+ ByteOrder: cs.ByteOrder,
+ Types: cs.Types,
+ }
+
+ for name, spec := range cs.Maps {
+ cpy.Maps[name] = spec.Copy()
+ }
+
+ for name, spec := range cs.Programs {
+ cpy.Programs[name] = spec.Copy()
+ }
+
+ return &cpy
+}
+
+// RewriteMaps replaces all references to specific maps.
+//
+// Use this function to use pre-existing maps instead of creating new ones
+// when calling NewCollection. Any named maps are removed from CollectionSpec.Maps.
+//
+// Returns an error if a named map isn't used in at least one program.
+//
+// Deprecated: Pass CollectionOptions.MapReplacements when loading the Collection
+// instead.
+func (cs *CollectionSpec) RewriteMaps(maps map[string]*Map) error {
+ for symbol, m := range maps {
+ // have we seen a program that uses this symbol / map
+ seen := false
+ for progName, progSpec := range cs.Programs {
+ err := progSpec.Instructions.AssociateMap(symbol, m)
+
+ switch {
+ case err == nil:
+ seen = true
+
+ case errors.Is(err, asm.ErrUnreferencedSymbol):
+ // Not all programs need to use the map
+
+ default:
+ return fmt.Errorf("program %s: %w", progName, err)
+ }
+ }
+
+ if !seen {
+ return fmt.Errorf("map %s not referenced by any programs", symbol)
+ }
+
+ // Prevent NewCollection from creating rewritten maps
+ delete(cs.Maps, symbol)
+ }
+
+ return nil
+}
+
+// MissingConstantsError is returned by [CollectionSpec.RewriteConstants].
+type MissingConstantsError struct {
+ // The constants missing from .rodata.
+ Constants []string
+}
+
+func (m *MissingConstantsError) Error() string {
+ return fmt.Sprintf("some constants are missing from .rodata: %s", strings.Join(m.Constants, ", "))
+}
+
+// RewriteConstants replaces the value of multiple constants.
+//
+// The constant must be defined like so in the C program:
+//
+// volatile const type foobar;
+// volatile const type foobar = default;
+//
+// Replacement values must be of the same length as the C sizeof(type).
+// If necessary, they are marshalled according to the same rules as
+// map values.
+//
+// From Linux 5.5 the verifier will use constants to eliminate dead code.
+//
+// Returns an error wrapping [MissingConstantsError] if a constant doesn't exist.
+func (cs *CollectionSpec) RewriteConstants(consts map[string]interface{}) error {
+ replaced := make(map[string]bool)
+
+ for name, spec := range cs.Maps {
+ if !strings.HasPrefix(name, ".rodata") {
+ continue
+ }
+
+ b, ds, err := spec.dataSection()
+ if errors.Is(err, errMapNoBTFValue) {
+ // Data sections without a BTF Datasec are valid, but don't support
+ // constant replacements.
+ continue
+ }
+ if err != nil {
+ return fmt.Errorf("map %s: %w", name, err)
+ }
+
+ // MapSpec.Copy() performs a shallow copy. Fully copy the byte slice
+ // to avoid any changes affecting other copies of the MapSpec.
+ cpy := make([]byte, len(b))
+ copy(cpy, b)
+
+ for _, v := range ds.Vars {
+ vname := v.Type.TypeName()
+ replacement, ok := consts[vname]
+ if !ok {
+ continue
+ }
+
+ if _, ok := v.Type.(*btf.Var); !ok {
+ return fmt.Errorf("section %s: unexpected type %T for variable %s", name, v.Type, vname)
+ }
+
+ if replaced[vname] {
+ return fmt.Errorf("section %s: duplicate variable %s", name, vname)
+ }
+
+ if int(v.Offset+v.Size) > len(cpy) {
+ return fmt.Errorf("section %s: offset %d(+%d) for variable %s is out of bounds", name, v.Offset, v.Size, vname)
+ }
+
+ b, err := sysenc.Marshal(replacement, int(v.Size))
+ if err != nil {
+ return fmt.Errorf("marshaling constant replacement %s: %w", vname, err)
+ }
+
+ b.CopyTo(cpy[v.Offset : v.Offset+v.Size])
+
+ replaced[vname] = true
+ }
+
+ spec.Contents[0] = MapKV{Key: uint32(0), Value: cpy}
+ }
+
+ var missing []string
+ for c := range consts {
+ if !replaced[c] {
+ missing = append(missing, c)
+ }
+ }
+
+ if len(missing) != 0 {
+ return fmt.Errorf("rewrite constants: %w", &MissingConstantsError{Constants: missing})
+ }
+
+ return nil
+}
+
+// Assign the contents of a CollectionSpec to a struct.
+//
+// This function is a shortcut to manually checking the presence
+// of maps and programs in a CollectionSpec. Consider using bpf2go
+// if this sounds useful.
+//
+// 'to' must be a pointer to a struct. A field of the
+// struct is updated with values from Programs or Maps if it
+// has an `ebpf` tag and its type is *ProgramSpec or *MapSpec.
+// The tag's value specifies the name of the program or map as
+// found in the CollectionSpec.
+//
+// struct {
+// Foo *ebpf.ProgramSpec `ebpf:"xdp_foo"`
+// Bar *ebpf.MapSpec `ebpf:"bar_map"`
+// Ignored int
+// }
+//
+// Returns an error if any of the eBPF objects can't be found, or
+// if the same MapSpec or ProgramSpec is assigned multiple times.
+func (cs *CollectionSpec) Assign(to interface{}) error {
+ // Assign() only supports assigning ProgramSpecs and MapSpecs,
+ // so doesn't load any resources into the kernel.
+ getValue := func(typ reflect.Type, name string) (interface{}, error) {
+ switch typ {
+
+ case reflect.TypeOf((*ProgramSpec)(nil)):
+ if p := cs.Programs[name]; p != nil {
+ return p, nil
+ }
+ return nil, fmt.Errorf("missing program %q", name)
+
+ case reflect.TypeOf((*MapSpec)(nil)):
+ if m := cs.Maps[name]; m != nil {
+ return m, nil
+ }
+ return nil, fmt.Errorf("missing map %q", name)
+
+ default:
+ return nil, fmt.Errorf("unsupported type %s", typ)
+ }
+ }
+
+ return assignValues(to, getValue)
+}
+
+// LoadAndAssign loads Maps and Programs into the kernel and assigns them
+// to a struct.
+//
+// Omitting Map/Program.Close() during application shutdown is an error.
+// See the package documentation for details around Map and Program lifecycle.
+//
+// This function is a shortcut to manually checking the presence
+// of maps and programs in a CollectionSpec. Consider using bpf2go
+// if this sounds useful.
+//
+// 'to' must be a pointer to a struct. A field of the struct is updated with
+// a Program or Map if it has an `ebpf` tag and its type is *Program or *Map.
+// The tag's value specifies the name of the program or map as found in the
+// CollectionSpec. Before updating the struct, the requested objects and their
+// dependent resources are loaded into the kernel and populated with values if
+// specified.
+//
+// struct {
+// Foo *ebpf.Program `ebpf:"xdp_foo"`
+// Bar *ebpf.Map `ebpf:"bar_map"`
+// Ignored int
+// }
+//
+// opts may be nil.
+//
+// Returns an error if any of the fields can't be found, or
+// if the same Map or Program is assigned multiple times.
+func (cs *CollectionSpec) LoadAndAssign(to interface{}, opts *CollectionOptions) error {
+ loader, err := newCollectionLoader(cs, opts)
+ if err != nil {
+ return err
+ }
+ defer loader.close()
+
+ // Support assigning Programs and Maps, lazy-loading the required objects.
+ assignedMaps := make(map[string]bool)
+ assignedProgs := make(map[string]bool)
+
+ getValue := func(typ reflect.Type, name string) (interface{}, error) {
+ switch typ {
+
+ case reflect.TypeOf((*Program)(nil)):
+ assignedProgs[name] = true
+ return loader.loadProgram(name)
+
+ case reflect.TypeOf((*Map)(nil)):
+ assignedMaps[name] = true
+ return loader.loadMap(name)
+
+ default:
+ return nil, fmt.Errorf("unsupported type %s", typ)
+ }
+ }
+
+ // Load the Maps and Programs requested by the annotated struct.
+ if err := assignValues(to, getValue); err != nil {
+ return err
+ }
+
+ // Populate the requested maps. Has a chance of lazy-loading other dependent maps.
+ if err := loader.populateDeferredMaps(); err != nil {
+ return err
+ }
+
+ // Evaluate the loader's objects after all (lazy)loading has taken place.
+ for n, m := range loader.maps {
+ switch m.typ {
+ case ProgramArray:
+ // Require all lazy-loaded ProgramArrays to be assigned to the given object.
+ // The kernel empties a ProgramArray once the last user space reference
+ // to it closes, which leads to failed tail calls. Combined with the library
+ // closing map fds via GC finalizers this can lead to surprising behaviour.
+ // Only allow unassigned ProgramArrays when the library hasn't pre-populated
+ // any entries from static value declarations. At this point, we know the map
+ // is empty and there's no way for the caller to interact with the map going
+ // forward.
+ if !assignedMaps[n] && len(cs.Maps[n].Contents) > 0 {
+ return fmt.Errorf("ProgramArray %s must be assigned to prevent missed tail calls", n)
+ }
+ }
+ }
+
+ // Prevent loader.cleanup() from closing assigned Maps and Programs.
+ for m := range assignedMaps {
+ delete(loader.maps, m)
+ }
+ for p := range assignedProgs {
+ delete(loader.programs, p)
+ }
+
+ return nil
+}
+
+// Collection is a collection of Programs and Maps associated
+// with their symbols
+type Collection struct {
+ Programs map[string]*Program
+ Maps map[string]*Map
+}
+
+// NewCollection creates a Collection from the given spec, creating and
+// loading its declared resources into the kernel.
+//
+// Omitting Collection.Close() during application shutdown is an error.
+// See the package documentation for details around Map and Program lifecycle.
+func NewCollection(spec *CollectionSpec) (*Collection, error) {
+ return NewCollectionWithOptions(spec, CollectionOptions{})
+}
+
+// NewCollectionWithOptions creates a Collection from the given spec using
+// options, creating and loading its declared resources into the kernel.
+//
+// Omitting Collection.Close() during application shutdown is an error.
+// See the package documentation for details around Map and Program lifecycle.
+func NewCollectionWithOptions(spec *CollectionSpec, opts CollectionOptions) (*Collection, error) {
+ loader, err := newCollectionLoader(spec, &opts)
+ if err != nil {
+ return nil, err
+ }
+ defer loader.close()
+
+ // Create maps first, as their fds need to be linked into programs.
+ for mapName := range spec.Maps {
+ if _, err := loader.loadMap(mapName); err != nil {
+ return nil, err
+ }
+ }
+
+ for progName, prog := range spec.Programs {
+ if prog.Type == UnspecifiedProgram {
+ continue
+ }
+
+ if _, err := loader.loadProgram(progName); err != nil {
+ return nil, err
+ }
+ }
+
+ // Maps can contain Program and Map stubs, so populate them after
+ // all Maps and Programs have been successfully loaded.
+ if err := loader.populateDeferredMaps(); err != nil {
+ return nil, err
+ }
+
+ // Prevent loader.cleanup from closing maps and programs.
+ maps, progs := loader.maps, loader.programs
+ loader.maps, loader.programs = nil, nil
+
+ return &Collection{
+ progs,
+ maps,
+ }, nil
+}
+
+type collectionLoader struct {
+ coll *CollectionSpec
+ opts *CollectionOptions
+ maps map[string]*Map
+ programs map[string]*Program
+}
+
+func newCollectionLoader(coll *CollectionSpec, opts *CollectionOptions) (*collectionLoader, error) {
+ if opts == nil {
+ opts = &CollectionOptions{}
+ }
+
+ // Check for existing MapSpecs in the CollectionSpec for all provided replacement maps.
+ for name, m := range opts.MapReplacements {
+ spec, ok := coll.Maps[name]
+ if !ok {
+ return nil, fmt.Errorf("replacement map %s not found in CollectionSpec", name)
+ }
+
+ if err := spec.Compatible(m); err != nil {
+ return nil, fmt.Errorf("using replacement map %s: %w", spec.Name, err)
+ }
+ }
+
+ return &collectionLoader{
+ coll,
+ opts,
+ make(map[string]*Map),
+ make(map[string]*Program),
+ }, nil
+}
+
+// close all resources left over in the collectionLoader.
+func (cl *collectionLoader) close() {
+ for _, m := range cl.maps {
+ m.Close()
+ }
+ for _, p := range cl.programs {
+ p.Close()
+ }
+}
+
+func (cl *collectionLoader) loadMap(mapName string) (*Map, error) {
+ if m := cl.maps[mapName]; m != nil {
+ return m, nil
+ }
+
+ mapSpec := cl.coll.Maps[mapName]
+ if mapSpec == nil {
+ return nil, fmt.Errorf("missing map %s", mapName)
+ }
+
+ if replaceMap, ok := cl.opts.MapReplacements[mapName]; ok {
+ // Clone the map to avoid closing user's map later on.
+ m, err := replaceMap.Clone()
+ if err != nil {
+ return nil, err
+ }
+
+ cl.maps[mapName] = m
+ return m, nil
+ }
+
+ m, err := newMapWithOptions(mapSpec, cl.opts.Maps)
+ if err != nil {
+ return nil, fmt.Errorf("map %s: %w", mapName, err)
+ }
+
+ // Finalize 'scalar' maps that don't refer to any other eBPF resources
+ // potentially pending creation. This is needed for frozen maps like .rodata
+ // that need to be finalized before invoking the verifier.
+ if !mapSpec.Type.canStoreMapOrProgram() {
+ if err := m.finalize(mapSpec); err != nil {
+ return nil, fmt.Errorf("finalizing map %s: %w", mapName, err)
+ }
+ }
+
+ cl.maps[mapName] = m
+ return m, nil
+}
+
+func (cl *collectionLoader) loadProgram(progName string) (*Program, error) {
+ if prog := cl.programs[progName]; prog != nil {
+ return prog, nil
+ }
+
+ progSpec := cl.coll.Programs[progName]
+ if progSpec == nil {
+ return nil, fmt.Errorf("unknown program %s", progName)
+ }
+
+ // Bail out early if we know the kernel is going to reject the program.
+ // This skips loading map dependencies, saving some cleanup work later.
+ if progSpec.Type == UnspecifiedProgram {
+ return nil, fmt.Errorf("cannot load program %s: program type is unspecified", progName)
+ }
+
+ progSpec = progSpec.Copy()
+
+ // Rewrite any reference to a valid map in the program's instructions,
+ // which includes all of its dependencies.
+ for i := range progSpec.Instructions {
+ ins := &progSpec.Instructions[i]
+
+ if !ins.IsLoadFromMap() || ins.Reference() == "" {
+ continue
+ }
+
+ // Don't overwrite map loads containing non-zero map fd's,
+ // they can be manually included by the caller.
+ // Map FDs/IDs are placed in the lower 32 bits of Constant.
+ if int32(ins.Constant) > 0 {
+ continue
+ }
+
+ m, err := cl.loadMap(ins.Reference())
+ if err != nil {
+ return nil, fmt.Errorf("program %s: %w", progName, err)
+ }
+
+ if err := ins.AssociateMap(m); err != nil {
+ return nil, fmt.Errorf("program %s: map %s: %w", progName, ins.Reference(), err)
+ }
+ }
+
+ prog, err := newProgramWithOptions(progSpec, cl.opts.Programs)
+ if err != nil {
+ return nil, fmt.Errorf("program %s: %w", progName, err)
+ }
+
+ cl.programs[progName] = prog
+ return prog, nil
+}
+
+// populateDeferredMaps iterates maps holding programs or other maps and loads
+// any dependencies. Populates all maps in cl and freezes them if specified.
+func (cl *collectionLoader) populateDeferredMaps() error {
+ for mapName, m := range cl.maps {
+ mapSpec, ok := cl.coll.Maps[mapName]
+ if !ok {
+ return fmt.Errorf("missing map spec %s", mapName)
+ }
+
+ // Scalar maps without Map or Program references are finalized during
+ // creation. Don't finalize them again.
+ if !mapSpec.Type.canStoreMapOrProgram() {
+ continue
+ }
+
+ mapSpec = mapSpec.Copy()
+
+ // MapSpecs that refer to inner maps or programs within the same
+ // CollectionSpec do so using strings. These strings are used as the key
+ // to look up the respective object in the Maps or Programs fields.
+ // Resolve those references to actual Map or Program resources that
+ // have been loaded into the kernel.
+ for i, kv := range mapSpec.Contents {
+ objName, ok := kv.Value.(string)
+ if !ok {
+ continue
+ }
+
+ switch t := mapSpec.Type; {
+ case t.canStoreProgram():
+ // loadProgram is idempotent and could return an existing Program.
+ prog, err := cl.loadProgram(objName)
+ if err != nil {
+ return fmt.Errorf("loading program %s, for map %s: %w", objName, mapName, err)
+ }
+ mapSpec.Contents[i] = MapKV{kv.Key, prog}
+
+ case t.canStoreMap():
+ // loadMap is idempotent and could return an existing Map.
+ innerMap, err := cl.loadMap(objName)
+ if err != nil {
+ return fmt.Errorf("loading inner map %s, for map %s: %w", objName, mapName, err)
+ }
+ mapSpec.Contents[i] = MapKV{kv.Key, innerMap}
+ }
+ }
+
+ // Populate and freeze the map if specified.
+ if err := m.finalize(mapSpec); err != nil {
+ return fmt.Errorf("populating map %s: %w", mapName, err)
+ }
+ }
+
+ return nil
+}
+
+// resolveKconfig resolves all variables declared in .kconfig and populates
+// m.Contents. Does nothing if the given m.Contents is non-empty.
+func resolveKconfig(m *MapSpec) error {
+ ds, ok := m.Value.(*btf.Datasec)
+ if !ok {
+ return errors.New("map value is not a Datasec")
+ }
+
+ type configInfo struct {
+ offset uint32
+ typ btf.Type
+ }
+
+ configs := make(map[string]configInfo)
+
+ data := make([]byte, ds.Size)
+ for _, vsi := range ds.Vars {
+ v := vsi.Type.(*btf.Var)
+ n := v.TypeName()
+
+ switch n {
+ case "LINUX_KERNEL_VERSION":
+ if integer, ok := v.Type.(*btf.Int); !ok || integer.Size != 4 {
+ return fmt.Errorf("variable %s must be a 32 bits integer, got %s", n, v.Type)
+ }
+
+ kv, err := internal.KernelVersion()
+ if err != nil {
+ return fmt.Errorf("getting kernel version: %w", err)
+ }
+ internal.NativeEndian.PutUint32(data[vsi.Offset:], kv.Kernel())
+
+ case "LINUX_HAS_SYSCALL_WRAPPER":
+ if integer, ok := v.Type.(*btf.Int); !ok || integer.Size != 4 {
+ return fmt.Errorf("variable %s must be a 32 bits integer, got %s", n, v.Type)
+ }
+ var value uint32 = 1
+ if err := haveSyscallWrapper(); errors.Is(err, ErrNotSupported) {
+ value = 0
+ } else if err != nil {
+ return fmt.Errorf("unable to derive a value for LINUX_HAS_SYSCALL_WRAPPER: %w", err)
+ }
+
+ internal.NativeEndian.PutUint32(data[vsi.Offset:], value)
+
+ default: // Catch CONFIG_*.
+ configs[n] = configInfo{
+ offset: vsi.Offset,
+ typ: v.Type,
+ }
+ }
+ }
+
+ // We only parse kconfig file if a CONFIG_* variable was found.
+ if len(configs) > 0 {
+ f, err := kconfig.Find()
+ if err != nil {
+ return fmt.Errorf("cannot find a kconfig file: %w", err)
+ }
+ defer f.Close()
+
+ filter := make(map[string]struct{}, len(configs))
+ for config := range configs {
+ filter[config] = struct{}{}
+ }
+
+ kernelConfig, err := kconfig.Parse(f, filter)
+ if err != nil {
+ return fmt.Errorf("cannot parse kconfig file: %w", err)
+ }
+
+ for n, info := range configs {
+ value, ok := kernelConfig[n]
+ if !ok {
+ return fmt.Errorf("config option %q does not exists for this kernel", n)
+ }
+
+ err := kconfig.PutValue(data[info.offset:], info.typ, value)
+ if err != nil {
+ return fmt.Errorf("problem adding value for %s: %w", n, err)
+ }
+ }
+ }
+
+ m.Contents = []MapKV{{uint32(0), data}}
+
+ return nil
+}
+
+// LoadCollection reads an object file and creates and loads its declared
+// resources into the kernel.
+//
+// Omitting Collection.Close() during application shutdown is an error.
+// See the package documentation for details around Map and Program lifecycle.
+func LoadCollection(file string) (*Collection, error) {
+ spec, err := LoadCollectionSpec(file)
+ if err != nil {
+ return nil, err
+ }
+ return NewCollection(spec)
+}
+
+// Close frees all maps and programs associated with the collection.
+//
+// The collection mustn't be used afterwards.
+func (coll *Collection) Close() {
+ for _, prog := range coll.Programs {
+ prog.Close()
+ }
+ for _, m := range coll.Maps {
+ m.Close()
+ }
+}
+
+// DetachMap removes the named map from the Collection.
+//
+// This means that a later call to Close() will not affect this map.
+//
+// Returns nil if no map of that name exists.
+func (coll *Collection) DetachMap(name string) *Map {
+ m := coll.Maps[name]
+ delete(coll.Maps, name)
+ return m
+}
+
+// DetachProgram removes the named program from the Collection.
+//
+// This means that a later call to Close() will not affect this program.
+//
+// Returns nil if no program of that name exists.
+func (coll *Collection) DetachProgram(name string) *Program {
+ p := coll.Programs[name]
+ delete(coll.Programs, name)
+ return p
+}
+
+// structField represents a struct field containing the ebpf struct tag.
+type structField struct {
+ reflect.StructField
+ value reflect.Value
+}
+
+// ebpfFields extracts field names tagged with 'ebpf' from a struct type.
+// Keep track of visited types to avoid infinite recursion.
+func ebpfFields(structVal reflect.Value, visited map[reflect.Type]bool) ([]structField, error) {
+ if visited == nil {
+ visited = make(map[reflect.Type]bool)
+ }
+
+ structType := structVal.Type()
+ if structType.Kind() != reflect.Struct {
+ return nil, fmt.Errorf("%s is not a struct", structType)
+ }
+
+ if visited[structType] {
+ return nil, fmt.Errorf("recursion on type %s", structType)
+ }
+
+ fields := make([]structField, 0, structType.NumField())
+ for i := 0; i < structType.NumField(); i++ {
+ field := structField{structType.Field(i), structVal.Field(i)}
+
+ // If the field is tagged, gather it and move on.
+ name := field.Tag.Get("ebpf")
+ if name != "" {
+ fields = append(fields, field)
+ continue
+ }
+
+ // If the field does not have an ebpf tag, but is a struct or a pointer
+ // to a struct, attempt to gather its fields as well.
+ var v reflect.Value
+ switch field.Type.Kind() {
+ case reflect.Ptr:
+ if field.Type.Elem().Kind() != reflect.Struct {
+ continue
+ }
+
+ if field.value.IsNil() {
+ return nil, fmt.Errorf("nil pointer to %s", structType)
+ }
+
+ // Obtain the destination type of the pointer.
+ v = field.value.Elem()
+
+ case reflect.Struct:
+ // Reference the value's type directly.
+ v = field.value
+
+ default:
+ continue
+ }
+
+ inner, err := ebpfFields(v, visited)
+ if err != nil {
+ return nil, fmt.Errorf("field %s: %w", field.Name, err)
+ }
+
+ fields = append(fields, inner...)
+ }
+
+ return fields, nil
+}
+
+// assignValues attempts to populate all fields of 'to' tagged with 'ebpf'.
+//
+// getValue is called for every tagged field of 'to' and must return the value
+// to be assigned to the field with the given typ and name.
+func assignValues(to interface{},
+ getValue func(typ reflect.Type, name string) (interface{}, error)) error {
+
+ toValue := reflect.ValueOf(to)
+ if toValue.Type().Kind() != reflect.Ptr {
+ return fmt.Errorf("%T is not a pointer to struct", to)
+ }
+
+ if toValue.IsNil() {
+ return fmt.Errorf("nil pointer to %T", to)
+ }
+
+ fields, err := ebpfFields(toValue.Elem(), nil)
+ if err != nil {
+ return err
+ }
+
+ type elem struct {
+ // Either *Map or *Program
+ typ reflect.Type
+ name string
+ }
+
+ assigned := make(map[elem]string)
+ for _, field := range fields {
+ // Get string value the field is tagged with.
+ tag := field.Tag.Get("ebpf")
+ if strings.Contains(tag, ",") {
+ return fmt.Errorf("field %s: ebpf tag contains a comma", field.Name)
+ }
+
+ // Check if the eBPF object with the requested
+ // type and tag was already assigned elsewhere.
+ e := elem{field.Type, tag}
+ if af := assigned[e]; af != "" {
+ return fmt.Errorf("field %s: object %q was already assigned to %s", field.Name, tag, af)
+ }
+
+ // Get the eBPF object referred to by the tag.
+ value, err := getValue(field.Type, tag)
+ if err != nil {
+ return fmt.Errorf("field %s: %w", field.Name, err)
+ }
+
+ if !field.value.CanSet() {
+ return fmt.Errorf("field %s: can't set value", field.Name)
+ }
+ field.value.Set(reflect.ValueOf(value))
+
+ assigned[e] = field.Name
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/ebpf/doc.go b/vendor/github.com/cilium/ebpf/doc.go
new file mode 100644
index 000000000..396b3394d
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/doc.go
@@ -0,0 +1,25 @@
+// Package ebpf is a toolkit for working with eBPF programs.
+//
+// eBPF programs are small snippets of code which are executed directly
+// in a VM in the Linux kernel, which makes them very fast and flexible.
+// Many Linux subsystems now accept eBPF programs. This makes it possible
+// to implement highly application specific logic inside the kernel,
+// without having to modify the actual kernel itself.
+//
+// This package is designed for long-running processes which
+// want to use eBPF to implement part of their application logic. It has no
+// run-time dependencies outside of the library and the Linux kernel itself.
+// eBPF code should be compiled ahead of time using clang, and shipped with
+// your application as any other resource.
+//
+// Use the link subpackage to attach a loaded program to a hook in the kernel.
+//
+// Note that losing all references to Map and Program resources will cause
+// their underlying file descriptors to be closed, potentially removing those
+// objects from the kernel. Always retain a reference by e.g. deferring a
+// Close() of a Collection or LoadAndAssign object until application exit.
+//
+// Special care needs to be taken when handling maps of type ProgramArray,
+// as the kernel erases its contents when the last userspace or bpffs
+// reference disappears, regardless of the map being in active use.
+package ebpf
diff --git a/vendor/github.com/cilium/ebpf/elf_reader.go b/vendor/github.com/cilium/ebpf/elf_reader.go
new file mode 100644
index 000000000..5e0bb98ea
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/elf_reader.go
@@ -0,0 +1,1309 @@
+package ebpf
+
+import (
+ "bufio"
+ "bytes"
+ "debug/elf"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "os"
+ "strings"
+
+ "github.com/cilium/ebpf/asm"
+ "github.com/cilium/ebpf/btf"
+ "github.com/cilium/ebpf/internal"
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+type kconfigMetaKey struct{}
+
+type kconfigMeta struct {
+ Map *MapSpec
+ Offset uint32
+}
+
+type kfuncMeta struct{}
+
+// elfCode is a convenience to reduce the amount of arguments that have to
+// be passed around explicitly. You should treat its contents as immutable.
+type elfCode struct {
+ *internal.SafeELFFile
+ sections map[elf.SectionIndex]*elfSection
+ license string
+ version uint32
+ btf *btf.Spec
+ extInfo *btf.ExtInfos
+ maps map[string]*MapSpec
+ kfuncs map[string]*btf.Func
+ kconfig *MapSpec
+}
+
+// LoadCollectionSpec parses an ELF file into a CollectionSpec.
+func LoadCollectionSpec(file string) (*CollectionSpec, error) {
+ f, err := os.Open(file)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ spec, err := LoadCollectionSpecFromReader(f)
+ if err != nil {
+ return nil, fmt.Errorf("file %s: %w", file, err)
+ }
+ return spec, nil
+}
+
+// LoadCollectionSpecFromReader parses an ELF file into a CollectionSpec.
+func LoadCollectionSpecFromReader(rd io.ReaderAt) (*CollectionSpec, error) {
+ f, err := internal.NewSafeELFFile(rd)
+ if err != nil {
+ return nil, err
+ }
+
+ // Checks if the ELF file is for BPF data.
+ // Old LLVM versions set e_machine to EM_NONE.
+ if f.File.Machine != unix.EM_NONE && f.File.Machine != elf.EM_BPF {
+ return nil, fmt.Errorf("unexpected machine type for BPF ELF: %s", f.File.Machine)
+ }
+
+ var (
+ licenseSection *elf.Section
+ versionSection *elf.Section
+ sections = make(map[elf.SectionIndex]*elfSection)
+ relSections = make(map[elf.SectionIndex]*elf.Section)
+ )
+
+ // This is the target of relocations generated by inline assembly.
+ sections[elf.SHN_UNDEF] = newElfSection(new(elf.Section), undefSection)
+
+ // Collect all the sections we're interested in. This includes relocations
+ // which we parse later.
+ //
+ // Keep the documentation at docs/ebpf/loading/elf-sections.md up-to-date.
+ for i, sec := range f.Sections {
+ idx := elf.SectionIndex(i)
+
+ switch {
+ case strings.HasPrefix(sec.Name, "license"):
+ licenseSection = sec
+ case strings.HasPrefix(sec.Name, "version"):
+ versionSection = sec
+ case strings.HasPrefix(sec.Name, "maps"):
+ sections[idx] = newElfSection(sec, mapSection)
+ case sec.Name == ".maps":
+ sections[idx] = newElfSection(sec, btfMapSection)
+ case sec.Name == ".bss" || sec.Name == ".data" || strings.HasPrefix(sec.Name, ".rodata"):
+ sections[idx] = newElfSection(sec, dataSection)
+ case sec.Type == elf.SHT_REL:
+ // Store relocations under the section index of the target
+ relSections[elf.SectionIndex(sec.Info)] = sec
+ case sec.Type == elf.SHT_PROGBITS && (sec.Flags&elf.SHF_EXECINSTR) != 0 && sec.Size > 0:
+ sections[idx] = newElfSection(sec, programSection)
+ }
+ }
+
+ license, err := loadLicense(licenseSection)
+ if err != nil {
+ return nil, fmt.Errorf("load license: %w", err)
+ }
+
+ version, err := loadVersion(versionSection, f.ByteOrder)
+ if err != nil {
+ return nil, fmt.Errorf("load version: %w", err)
+ }
+
+ btfSpec, btfExtInfo, err := btf.LoadSpecAndExtInfosFromReader(rd)
+ if err != nil && !errors.Is(err, btf.ErrNotFound) {
+ return nil, fmt.Errorf("load BTF: %w", err)
+ }
+
+ ec := &elfCode{
+ SafeELFFile: f,
+ sections: sections,
+ license: license,
+ version: version,
+ btf: btfSpec,
+ extInfo: btfExtInfo,
+ maps: make(map[string]*MapSpec),
+ kfuncs: make(map[string]*btf.Func),
+ }
+
+ symbols, err := f.Symbols()
+ if err != nil {
+ return nil, fmt.Errorf("load symbols: %v", err)
+ }
+
+ ec.assignSymbols(symbols)
+
+ if err := ec.loadRelocations(relSections, symbols); err != nil {
+ return nil, fmt.Errorf("load relocations: %w", err)
+ }
+
+ if err := ec.loadMaps(); err != nil {
+ return nil, fmt.Errorf("load maps: %w", err)
+ }
+
+ if err := ec.loadBTFMaps(); err != nil {
+ return nil, fmt.Errorf("load BTF maps: %w", err)
+ }
+
+ if err := ec.loadDataSections(); err != nil {
+ return nil, fmt.Errorf("load data sections: %w", err)
+ }
+
+ if err := ec.loadKconfigSection(); err != nil {
+ return nil, fmt.Errorf("load virtual .kconfig section: %w", err)
+ }
+
+ if err := ec.loadKsymsSection(); err != nil {
+ return nil, fmt.Errorf("load virtual .ksyms section: %w", err)
+ }
+
+ // Finally, collect programs and link them.
+ progs, err := ec.loadProgramSections()
+ if err != nil {
+ return nil, fmt.Errorf("load programs: %w", err)
+ }
+
+ return &CollectionSpec{ec.maps, progs, btfSpec, ec.ByteOrder}, nil
+}
+
+func loadLicense(sec *elf.Section) (string, error) {
+ if sec == nil {
+ return "", nil
+ }
+
+ data, err := sec.Data()
+ if err != nil {
+ return "", fmt.Errorf("section %s: %v", sec.Name, err)
+ }
+ return string(bytes.TrimRight(data, "\000")), nil
+}
+
+func loadVersion(sec *elf.Section, bo binary.ByteOrder) (uint32, error) {
+ if sec == nil {
+ return 0, nil
+ }
+
+ var version uint32
+ if err := binary.Read(sec.Open(), bo, &version); err != nil {
+ return 0, fmt.Errorf("section %s: %v", sec.Name, err)
+ }
+ return version, nil
+}
+
+type elfSectionKind int
+
+const (
+ undefSection elfSectionKind = iota
+ mapSection
+ btfMapSection
+ programSection
+ dataSection
+)
+
+type elfSection struct {
+ *elf.Section
+ kind elfSectionKind
+ // Offset from the start of the section to a symbol
+ symbols map[uint64]elf.Symbol
+ // Offset from the start of the section to a relocation, which points at
+ // a symbol in another section.
+ relocations map[uint64]elf.Symbol
+ // The number of relocations pointing at this section.
+ references int
+}
+
+func newElfSection(section *elf.Section, kind elfSectionKind) *elfSection {
+ return &elfSection{
+ section,
+ kind,
+ make(map[uint64]elf.Symbol),
+ make(map[uint64]elf.Symbol),
+ 0,
+ }
+}
+
+// assignSymbols takes a list of symbols and assigns them to their
+// respective sections, indexed by name.
+func (ec *elfCode) assignSymbols(symbols []elf.Symbol) {
+ for _, symbol := range symbols {
+ symType := elf.ST_TYPE(symbol.Info)
+ symSection := ec.sections[symbol.Section]
+ if symSection == nil {
+ continue
+ }
+
+ // Anonymous symbols only occur in debug sections which we don't process
+ // relocations for. Anonymous symbols are not referenced from other sections.
+ if symbol.Name == "" {
+ continue
+ }
+
+ // Older versions of LLVM don't tag symbols correctly, so keep
+ // all NOTYPE ones.
+ switch symSection.kind {
+ case mapSection, btfMapSection, dataSection:
+ if symType != elf.STT_NOTYPE && symType != elf.STT_OBJECT {
+ continue
+ }
+ case programSection:
+ if symType != elf.STT_NOTYPE && symType != elf.STT_FUNC {
+ continue
+ }
+ // LLVM emits LBB_ (Local Basic Block) symbols that seem to be jump
+ // targets within sections, but BPF has no use for them.
+ if symType == elf.STT_NOTYPE && elf.ST_BIND(symbol.Info) == elf.STB_LOCAL &&
+ strings.HasPrefix(symbol.Name, "LBB") {
+ continue
+ }
+ // Only collect symbols that occur in program/maps/data sections.
+ default:
+ continue
+ }
+
+ symSection.symbols[symbol.Value] = symbol
+ }
+}
+
+// loadRelocations iterates .rel* sections and extracts relocation entries for
+// sections of interest. Makes sure relocations point at valid sections.
+func (ec *elfCode) loadRelocations(relSections map[elf.SectionIndex]*elf.Section, symbols []elf.Symbol) error {
+ for idx, relSection := range relSections {
+ section := ec.sections[idx]
+ if section == nil {
+ continue
+ }
+
+ rels, err := ec.loadSectionRelocations(relSection, symbols)
+ if err != nil {
+ return fmt.Errorf("relocation for section %q: %w", section.Name, err)
+ }
+
+ for _, rel := range rels {
+ target := ec.sections[rel.Section]
+ if target == nil {
+ return fmt.Errorf("section %q: reference to %q in section %s: %w", section.Name, rel.Name, rel.Section, ErrNotSupported)
+ }
+
+ target.references++
+ }
+
+ section.relocations = rels
+ }
+
+ return nil
+}
+
+// loadProgramSections iterates ec's sections and emits a ProgramSpec
+// for each function it finds.
+//
+// The resulting map is indexed by function name.
+func (ec *elfCode) loadProgramSections() (map[string]*ProgramSpec, error) {
+
+ progs := make(map[string]*ProgramSpec)
+
+ // Generate a ProgramSpec for each function found in each program section.
+ var export []string
+ for _, sec := range ec.sections {
+ if sec.kind != programSection {
+ continue
+ }
+
+ if len(sec.symbols) == 0 {
+ return nil, fmt.Errorf("section %v: missing symbols", sec.Name)
+ }
+
+ funcs, err := ec.loadFunctions(sec)
+ if err != nil {
+ return nil, fmt.Errorf("section %v: %w", sec.Name, err)
+ }
+
+ progType, attachType, progFlags, attachTo := getProgType(sec.Name)
+
+ for name, insns := range funcs {
+ spec := &ProgramSpec{
+ Name: name,
+ Type: progType,
+ Flags: progFlags,
+ AttachType: attachType,
+ AttachTo: attachTo,
+ SectionName: sec.Name,
+ License: ec.license,
+ KernelVersion: ec.version,
+ Instructions: insns,
+ ByteOrder: ec.ByteOrder,
+ }
+
+ // Function names must be unique within a single ELF blob.
+ if progs[name] != nil {
+ return nil, fmt.Errorf("duplicate program name %s", name)
+ }
+ progs[name] = spec
+
+ if spec.SectionName != ".text" {
+ export = append(export, name)
+ }
+ }
+ }
+
+ flattenPrograms(progs, export)
+
+ // Hide programs (e.g. library functions) that were not explicitly emitted
+ // to an ELF section. These could be exposed in a separate CollectionSpec
+ // field later to allow them to be modified.
+ for n, p := range progs {
+ if p.SectionName == ".text" {
+ delete(progs, n)
+ }
+ }
+
+ return progs, nil
+}
+
+// loadFunctions extracts instruction streams from the given program section
+// starting at each symbol in the section. The section's symbols must already
+// be narrowed down to STT_NOTYPE (emitted by clang <8) or STT_FUNC.
+//
+// The resulting map is indexed by function name.
+func (ec *elfCode) loadFunctions(section *elfSection) (map[string]asm.Instructions, error) {
+ r := bufio.NewReader(section.Open())
+
+ // Decode the section's instruction stream.
+ var insns asm.Instructions
+ if err := insns.Unmarshal(r, ec.ByteOrder); err != nil {
+ return nil, fmt.Errorf("decoding instructions for section %s: %w", section.Name, err)
+ }
+ if len(insns) == 0 {
+ return nil, fmt.Errorf("no instructions found in section %s", section.Name)
+ }
+
+ iter := insns.Iterate()
+ for iter.Next() {
+ ins := iter.Ins
+ offset := iter.Offset.Bytes()
+
+ // Tag Symbol Instructions.
+ if sym, ok := section.symbols[offset]; ok {
+ *ins = ins.WithSymbol(sym.Name)
+ }
+
+ // Apply any relocations for the current instruction.
+ // If no relocation is present, resolve any section-relative function calls.
+ if rel, ok := section.relocations[offset]; ok {
+ if err := ec.relocateInstruction(ins, rel); err != nil {
+ return nil, fmt.Errorf("offset %d: relocating instruction: %w", offset, err)
+ }
+ } else {
+ if err := referenceRelativeJump(ins, offset, section.symbols); err != nil {
+ return nil, fmt.Errorf("offset %d: resolving relative jump: %w", offset, err)
+ }
+ }
+ }
+
+ if ec.extInfo != nil {
+ ec.extInfo.Assign(insns, section.Name)
+ }
+
+ return splitSymbols(insns)
+}
+
+// referenceRelativeJump turns a relative jump to another bpf subprogram within
+// the same ELF section into a Reference Instruction.
+//
+// Up to LLVM 9, calls to subprograms within the same ELF section are sometimes
+// encoded using relative jumps instead of relocation entries. These jumps go
+// out of bounds of the current program, so their targets must be memoized
+// before the section's instruction stream is split.
+//
+// The relative jump Constant is blinded to -1 and the target Symbol is set as
+// the Instruction's Reference so it can be resolved by the linker.
+func referenceRelativeJump(ins *asm.Instruction, offset uint64, symbols map[uint64]elf.Symbol) error {
+ if !ins.IsFunctionReference() || ins.Constant == -1 {
+ return nil
+ }
+
+ tgt := jumpTarget(offset, *ins)
+ sym := symbols[tgt].Name
+ if sym == "" {
+ return fmt.Errorf("no jump target found at offset %d", tgt)
+ }
+
+ *ins = ins.WithReference(sym)
+ ins.Constant = -1
+
+ return nil
+}
+
+// jumpTarget takes ins' offset within an instruction stream (in bytes)
+// and returns its absolute jump destination (in bytes) within the
+// instruction stream.
+func jumpTarget(offset uint64, ins asm.Instruction) uint64 {
+ // A relative jump instruction describes the amount of raw BPF instructions
+ // to jump, convert the offset into bytes.
+ dest := ins.Constant * asm.InstructionSize
+
+ // The starting point of the jump is the end of the current instruction.
+ dest += int64(offset + asm.InstructionSize)
+
+ if dest < 0 {
+ return 0
+ }
+
+ return uint64(dest)
+}
+
+func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) error {
+ var (
+ typ = elf.ST_TYPE(rel.Info)
+ bind = elf.ST_BIND(rel.Info)
+ name = rel.Name
+ )
+
+ target := ec.sections[rel.Section]
+
+ switch target.kind {
+ case mapSection, btfMapSection:
+ if bind != elf.STB_GLOBAL {
+ return fmt.Errorf("possible erroneous static qualifier on map definition: found reference to %q", name)
+ }
+
+ if typ != elf.STT_OBJECT && typ != elf.STT_NOTYPE {
+ // STT_NOTYPE is generated on clang < 8 which doesn't tag
+ // relocations appropriately.
+ return fmt.Errorf("map load: incorrect relocation type %v", typ)
+ }
+
+ ins.Src = asm.PseudoMapFD
+
+ case dataSection:
+ var offset uint32
+ switch typ {
+ case elf.STT_SECTION:
+ if bind != elf.STB_LOCAL {
+ return fmt.Errorf("direct load: %s: unsupported section relocation %s", name, bind)
+ }
+
+ // This is really a reference to a static symbol, which clang doesn't
+ // emit a symbol table entry for. Instead it encodes the offset in
+ // the instruction itself.
+ offset = uint32(uint64(ins.Constant))
+
+ case elf.STT_OBJECT:
+ // LLVM 9 emits OBJECT-LOCAL symbols for anonymous constants.
+ if bind != elf.STB_GLOBAL && bind != elf.STB_LOCAL {
+ return fmt.Errorf("direct load: %s: unsupported object relocation %s", name, bind)
+ }
+
+ offset = uint32(rel.Value)
+
+ case elf.STT_NOTYPE:
+ // LLVM 7 emits NOTYPE-LOCAL symbols for anonymous constants.
+ if bind != elf.STB_LOCAL {
+ return fmt.Errorf("direct load: %s: unsupported untyped relocation %s", name, bind)
+ }
+
+ offset = uint32(rel.Value)
+
+ default:
+ return fmt.Errorf("incorrect relocation type %v for direct map load", typ)
+ }
+
+ // We rely on using the name of the data section as the reference. It
+ // would be nicer to keep the real name in case of an STT_OBJECT, but
+ // it's not clear how to encode that into Instruction.
+ name = target.Name
+
+ // The kernel expects the offset in the second basic BPF instruction.
+ ins.Constant = int64(uint64(offset) << 32)
+ ins.Src = asm.PseudoMapValue
+
+ case programSection:
+ switch opCode := ins.OpCode; {
+ case opCode.JumpOp() == asm.Call:
+ if ins.Src != asm.PseudoCall {
+ return fmt.Errorf("call: %s: incorrect source register", name)
+ }
+
+ switch typ {
+ case elf.STT_NOTYPE, elf.STT_FUNC:
+ if bind != elf.STB_GLOBAL {
+ return fmt.Errorf("call: %s: unsupported binding: %s", name, bind)
+ }
+
+ case elf.STT_SECTION:
+ if bind != elf.STB_LOCAL {
+ return fmt.Errorf("call: %s: unsupported binding: %s", name, bind)
+ }
+
+ // The function we want to call is in the indicated section,
+ // at the offset encoded in the instruction itself. Reverse
+ // the calculation to find the real function we're looking for.
+ // A value of -1 references the first instruction in the section.
+ offset := int64(int32(ins.Constant)+1) * asm.InstructionSize
+ sym, ok := target.symbols[uint64(offset)]
+ if !ok {
+ return fmt.Errorf("call: no symbol at offset %d", offset)
+ }
+
+ name = sym.Name
+ ins.Constant = -1
+
+ default:
+ return fmt.Errorf("call: %s: invalid symbol type %s", name, typ)
+ }
+ case opCode.IsDWordLoad():
+ switch typ {
+ case elf.STT_FUNC:
+ if bind != elf.STB_GLOBAL {
+ return fmt.Errorf("load: %s: unsupported binding: %s", name, bind)
+ }
+
+ case elf.STT_SECTION:
+ if bind != elf.STB_LOCAL {
+ return fmt.Errorf("load: %s: unsupported binding: %s", name, bind)
+ }
+
+ // ins.Constant already contains the offset in bytes from the
+ // start of the section. This is different than a call to a
+ // static function.
+
+ default:
+ return fmt.Errorf("load: %s: invalid symbol type %s", name, typ)
+ }
+
+ sym, ok := target.symbols[uint64(ins.Constant)]
+ if !ok {
+ return fmt.Errorf("load: no symbol at offset %d", ins.Constant)
+ }
+
+ name = sym.Name
+ ins.Constant = -1
+ ins.Src = asm.PseudoFunc
+
+ default:
+ return fmt.Errorf("neither a call nor a load instruction: %v", ins)
+ }
+
+ // The Undefined section is used for 'virtual' symbols that aren't backed by
+ // an ELF section. This includes symbol references from inline asm, forward
+ // function declarations, as well as extern kfunc declarations using __ksym
+ // and extern kconfig variables declared using __kconfig.
+ case undefSection:
+ if bind != elf.STB_GLOBAL {
+ return fmt.Errorf("asm relocation: %s: unsupported binding: %s", name, bind)
+ }
+
+ if typ != elf.STT_NOTYPE {
+ return fmt.Errorf("asm relocation: %s: unsupported type %s", name, typ)
+ }
+
+ kf := ec.kfuncs[name]
+ switch {
+ // If a Call instruction is found and the datasec has a btf.Func with a Name
+ // that matches the symbol name we mark the instruction as a call to a kfunc.
+ case kf != nil && ins.OpCode.JumpOp() == asm.Call:
+ ins.Metadata.Set(kfuncMeta{}, kf)
+ ins.Src = asm.PseudoKfuncCall
+ ins.Constant = -1
+
+ // If no kconfig map is found, this must be a symbol reference from inline
+ // asm (see testdata/loader.c:asm_relocation()) or a call to a forward
+ // function declaration (see testdata/fwd_decl.c). Don't interfere, These
+ // remain standard symbol references.
+ // extern __kconfig reads are represented as dword loads that need to be
+ // rewritten to pseudo map loads from .kconfig. If the map is present,
+ // require it to contain the symbol to disambiguate between inline asm
+ // relos and kconfigs.
+ case ec.kconfig != nil && ins.OpCode.IsDWordLoad():
+ for _, vsi := range ec.kconfig.Value.(*btf.Datasec).Vars {
+ if vsi.Type.(*btf.Var).Name != rel.Name {
+ continue
+ }
+
+ ins.Src = asm.PseudoMapValue
+ ins.Metadata.Set(kconfigMetaKey{}, &kconfigMeta{ec.kconfig, vsi.Offset})
+ return nil
+ }
+
+ return fmt.Errorf("kconfig %s not found in .kconfig", rel.Name)
+ }
+
+ default:
+ return fmt.Errorf("relocation to %q: %w", target.Name, ErrNotSupported)
+ }
+
+ *ins = ins.WithReference(name)
+ return nil
+}
+
+func (ec *elfCode) loadMaps() error {
+ for _, sec := range ec.sections {
+ if sec.kind != mapSection {
+ continue
+ }
+
+ nSym := len(sec.symbols)
+ if nSym == 0 {
+ return fmt.Errorf("section %v: no symbols", sec.Name)
+ }
+
+ if sec.Size%uint64(nSym) != 0 {
+ return fmt.Errorf("section %v: map descriptors are not of equal size", sec.Name)
+ }
+
+ var (
+ r = bufio.NewReader(sec.Open())
+ size = sec.Size / uint64(nSym)
+ )
+ for i, offset := 0, uint64(0); i < nSym; i, offset = i+1, offset+size {
+ mapSym, ok := sec.symbols[offset]
+ if !ok {
+ return fmt.Errorf("section %s: missing symbol for map at offset %d", sec.Name, offset)
+ }
+
+ mapName := mapSym.Name
+ if ec.maps[mapName] != nil {
+ return fmt.Errorf("section %v: map %v already exists", sec.Name, mapSym)
+ }
+
+ lr := io.LimitReader(r, int64(size))
+
+ spec := MapSpec{
+ Name: SanitizeName(mapName, -1),
+ }
+ switch {
+ case binary.Read(lr, ec.ByteOrder, &spec.Type) != nil:
+ return fmt.Errorf("map %s: missing type", mapName)
+ case binary.Read(lr, ec.ByteOrder, &spec.KeySize) != nil:
+ return fmt.Errorf("map %s: missing key size", mapName)
+ case binary.Read(lr, ec.ByteOrder, &spec.ValueSize) != nil:
+ return fmt.Errorf("map %s: missing value size", mapName)
+ case binary.Read(lr, ec.ByteOrder, &spec.MaxEntries) != nil:
+ return fmt.Errorf("map %s: missing max entries", mapName)
+ case binary.Read(lr, ec.ByteOrder, &spec.Flags) != nil:
+ return fmt.Errorf("map %s: missing flags", mapName)
+ }
+
+ extra, err := io.ReadAll(lr)
+ if err != nil {
+ return fmt.Errorf("map %s: reading map tail: %w", mapName, err)
+ }
+ if len(extra) > 0 {
+ spec.Extra = bytes.NewReader(extra)
+ }
+
+ ec.maps[mapName] = &spec
+ }
+ }
+
+ return nil
+}
+
+// loadBTFMaps iterates over all ELF sections marked as BTF map sections
+// (like .maps) and parses them into MapSpecs. Dump the .maps section and
+// any relocations with `readelf -x .maps -r `.
+func (ec *elfCode) loadBTFMaps() error {
+ for _, sec := range ec.sections {
+ if sec.kind != btfMapSection {
+ continue
+ }
+
+ if ec.btf == nil {
+ return fmt.Errorf("missing BTF")
+ }
+
+ // Each section must appear as a DataSec in the ELF's BTF blob.
+ var ds *btf.Datasec
+ if err := ec.btf.TypeByName(sec.Name, &ds); err != nil {
+ return fmt.Errorf("cannot find section '%s' in BTF: %w", sec.Name, err)
+ }
+
+ // Open a Reader to the ELF's raw section bytes so we can assert that all
+ // of them are zero on a per-map (per-Var) basis. For now, the section's
+ // sole purpose is to receive relocations, so all must be zero.
+ rs := sec.Open()
+
+ for _, vs := range ds.Vars {
+ // BPF maps are declared as and assigned to global variables,
+ // so iterate over each Var in the DataSec and validate their types.
+ v, ok := vs.Type.(*btf.Var)
+ if !ok {
+ return fmt.Errorf("section %v: unexpected type %s", sec.Name, vs.Type)
+ }
+ name := string(v.Name)
+
+ // The BTF metadata for each Var contains the full length of the map
+ // declaration, so read the corresponding amount of bytes from the ELF.
+ // This way, we can pinpoint which map declaration contains unexpected
+ // (and therefore unsupported) data.
+ _, err := io.Copy(internal.DiscardZeroes{}, io.LimitReader(rs, int64(vs.Size)))
+ if err != nil {
+ return fmt.Errorf("section %v: map %s: initializing BTF map definitions: %w", sec.Name, name, internal.ErrNotSupported)
+ }
+
+ if ec.maps[name] != nil {
+ return fmt.Errorf("section %v: map %s already exists", sec.Name, name)
+ }
+
+ // Each Var representing a BTF map definition contains a Struct.
+ mapStruct, ok := btf.UnderlyingType(v.Type).(*btf.Struct)
+ if !ok {
+ return fmt.Errorf("expected struct, got %s", v.Type)
+ }
+
+ mapSpec, err := mapSpecFromBTF(sec, &vs, mapStruct, ec.btf, name, false)
+ if err != nil {
+ return fmt.Errorf("map %v: %w", name, err)
+ }
+
+ ec.maps[name] = mapSpec
+ }
+
+ // Drain the ELF section reader to make sure all bytes are accounted for
+ // with BTF metadata.
+ i, err := io.Copy(io.Discard, rs)
+ if err != nil {
+ return fmt.Errorf("section %v: unexpected error reading remainder of ELF section: %w", sec.Name, err)
+ }
+ if i > 0 {
+ return fmt.Errorf("section %v: %d unexpected remaining bytes in ELF section, invalid BTF?", sec.Name, i)
+ }
+ }
+
+ return nil
+}
+
+// mapSpecFromBTF produces a MapSpec based on a btf.Struct def representing
+// a BTF map definition. The name and spec arguments will be copied to the
+// resulting MapSpec, and inner must be true on any recursive invocations.
+func mapSpecFromBTF(es *elfSection, vs *btf.VarSecinfo, def *btf.Struct, spec *btf.Spec, name string, inner bool) (*MapSpec, error) {
+ var (
+ key, value btf.Type
+ keySize, valueSize uint32
+ mapType MapType
+ flags, maxEntries uint32
+ pinType PinType
+ innerMapSpec *MapSpec
+ contents []MapKV
+ err error
+ )
+
+ for i, member := range def.Members {
+ switch member.Name {
+ case "type":
+ mt, err := uintFromBTF(member.Type)
+ if err != nil {
+ return nil, fmt.Errorf("can't get type: %w", err)
+ }
+ mapType = MapType(mt)
+
+ case "map_flags":
+ flags, err = uintFromBTF(member.Type)
+ if err != nil {
+ return nil, fmt.Errorf("can't get BTF map flags: %w", err)
+ }
+
+ case "max_entries":
+ maxEntries, err = uintFromBTF(member.Type)
+ if err != nil {
+ return nil, fmt.Errorf("can't get BTF map max entries: %w", err)
+ }
+
+ case "key":
+ if keySize != 0 {
+ return nil, errors.New("both key and key_size given")
+ }
+
+ pk, ok := member.Type.(*btf.Pointer)
+ if !ok {
+ return nil, fmt.Errorf("key type is not a pointer: %T", member.Type)
+ }
+
+ key = pk.Target
+
+ size, err := btf.Sizeof(pk.Target)
+ if err != nil {
+ return nil, fmt.Errorf("can't get size of BTF key: %w", err)
+ }
+
+ keySize = uint32(size)
+
+ case "value":
+ if valueSize != 0 {
+ return nil, errors.New("both value and value_size given")
+ }
+
+ vk, ok := member.Type.(*btf.Pointer)
+ if !ok {
+ return nil, fmt.Errorf("value type is not a pointer: %T", member.Type)
+ }
+
+ value = vk.Target
+
+ size, err := btf.Sizeof(vk.Target)
+ if err != nil {
+ return nil, fmt.Errorf("can't get size of BTF value: %w", err)
+ }
+
+ valueSize = uint32(size)
+
+ case "key_size":
+ // Key needs to be nil and keySize needs to be 0 for key_size to be
+ // considered a valid member.
+ if key != nil || keySize != 0 {
+ return nil, errors.New("both key and key_size given")
+ }
+
+ keySize, err = uintFromBTF(member.Type)
+ if err != nil {
+ return nil, fmt.Errorf("can't get BTF key size: %w", err)
+ }
+
+ case "value_size":
+ // Value needs to be nil and valueSize needs to be 0 for value_size to be
+ // considered a valid member.
+ if value != nil || valueSize != 0 {
+ return nil, errors.New("both value and value_size given")
+ }
+
+ valueSize, err = uintFromBTF(member.Type)
+ if err != nil {
+ return nil, fmt.Errorf("can't get BTF value size: %w", err)
+ }
+
+ case "pinning":
+ if inner {
+ return nil, errors.New("inner maps can't be pinned")
+ }
+
+ pinning, err := uintFromBTF(member.Type)
+ if err != nil {
+ return nil, fmt.Errorf("can't get pinning: %w", err)
+ }
+
+ pinType = PinType(pinning)
+
+ case "values":
+ // The 'values' field in BTF map definitions is used for declaring map
+ // value types that are references to other BPF objects, like other maps
+ // or programs. It is always expected to be an array of pointers.
+ if i != len(def.Members)-1 {
+ return nil, errors.New("'values' must be the last member in a BTF map definition")
+ }
+
+ if valueSize != 0 && valueSize != 4 {
+ return nil, errors.New("value_size must be 0 or 4")
+ }
+ valueSize = 4
+
+ valueType, err := resolveBTFArrayMacro(member.Type)
+ if err != nil {
+ return nil, fmt.Errorf("can't resolve type of member 'values': %w", err)
+ }
+
+ switch t := valueType.(type) {
+ case *btf.Struct:
+ // The values member pointing to an array of structs means we're expecting
+ // a map-in-map declaration.
+ if mapType != ArrayOfMaps && mapType != HashOfMaps {
+ return nil, errors.New("outer map needs to be an array or a hash of maps")
+ }
+ if inner {
+ return nil, fmt.Errorf("nested inner maps are not supported")
+ }
+
+ // This inner map spec is used as a map template, but it needs to be
+ // created as a traditional map before it can be used to do so.
+ // libbpf names the inner map template '.inner', but we
+ // opted for _inner to simplify validation logic. (dots only supported
+ // on kernels 5.2 and up)
+ // Pass the BTF spec from the parent object, since both parent and
+ // child must be created from the same BTF blob (on kernels that support BTF).
+ innerMapSpec, err = mapSpecFromBTF(es, vs, t, spec, name+"_inner", true)
+ if err != nil {
+ return nil, fmt.Errorf("can't parse BTF map definition of inner map: %w", err)
+ }
+
+ case *btf.FuncProto:
+ // The values member contains an array of function pointers, meaning an
+ // autopopulated PROG_ARRAY.
+ if mapType != ProgramArray {
+ return nil, errors.New("map needs to be a program array")
+ }
+
+ default:
+ return nil, fmt.Errorf("unsupported value type %q in 'values' field", t)
+ }
+
+ contents, err = resolveBTFValuesContents(es, vs, member)
+ if err != nil {
+ return nil, fmt.Errorf("resolving values contents: %w", err)
+ }
+
+ default:
+ return nil, fmt.Errorf("unrecognized field %s in BTF map definition", member.Name)
+ }
+ }
+
+ return &MapSpec{
+ Name: SanitizeName(name, -1),
+ Type: MapType(mapType),
+ KeySize: keySize,
+ ValueSize: valueSize,
+ MaxEntries: maxEntries,
+ Flags: flags,
+ Key: key,
+ Value: value,
+ Pinning: pinType,
+ InnerMap: innerMapSpec,
+ Contents: contents,
+ }, nil
+}
+
+// uintFromBTF resolves the __uint macro, which is a pointer to a sized
+// array, e.g. for int (*foo)[10], this function will return 10.
+func uintFromBTF(typ btf.Type) (uint32, error) {
+ ptr, ok := typ.(*btf.Pointer)
+ if !ok {
+ return 0, fmt.Errorf("not a pointer: %v", typ)
+ }
+
+ arr, ok := ptr.Target.(*btf.Array)
+ if !ok {
+ return 0, fmt.Errorf("not a pointer to array: %v", typ)
+ }
+
+ return arr.Nelems, nil
+}
+
+// resolveBTFArrayMacro resolves the __array macro, which declares an array
+// of pointers to a given type. This function returns the target Type of
+// the pointers in the array.
+func resolveBTFArrayMacro(typ btf.Type) (btf.Type, error) {
+ arr, ok := typ.(*btf.Array)
+ if !ok {
+ return nil, fmt.Errorf("not an array: %v", typ)
+ }
+
+ ptr, ok := arr.Type.(*btf.Pointer)
+ if !ok {
+ return nil, fmt.Errorf("not an array of pointers: %v", typ)
+ }
+
+ return ptr.Target, nil
+}
+
+// resolveBTFValuesContents resolves relocations into ELF sections belonging
+// to btf.VarSecinfo's. This can be used on the 'values' member in BTF map
+// definitions to extract static declarations of map contents.
+func resolveBTFValuesContents(es *elfSection, vs *btf.VarSecinfo, member btf.Member) ([]MapKV, error) {
+ // The elements of a .values pointer array are not encoded in BTF.
+ // Instead, relocations are generated into each array index.
+ // However, it's possible to leave certain array indices empty, so all
+ // indices' offsets need to be checked for emitted relocations.
+
+ // The offset of the 'values' member within the _struct_ (in bits)
+ // is the starting point of the array. Convert to bytes. Add VarSecinfo
+ // offset to get the absolute position in the ELF blob.
+ start := member.Offset.Bytes() + vs.Offset
+ // 'values' is encoded in BTF as a zero (variable) length struct
+ // member, and its contents run until the end of the VarSecinfo.
+ // Add VarSecinfo offset to get the absolute position in the ELF blob.
+ end := vs.Size + vs.Offset
+ // The size of an address in this section. This determines the width of
+ // an index in the array.
+ align := uint32(es.SectionHeader.Addralign)
+
+ // Check if variable-length section is aligned.
+ if (end-start)%align != 0 {
+ return nil, errors.New("unaligned static values section")
+ }
+ elems := (end - start) / align
+
+ if elems == 0 {
+ return nil, nil
+ }
+
+ contents := make([]MapKV, 0, elems)
+
+ // k is the array index, off is its corresponding ELF section offset.
+ for k, off := uint32(0), start; k < elems; k, off = k+1, off+align {
+ r, ok := es.relocations[uint64(off)]
+ if !ok {
+ continue
+ }
+
+ // Relocation exists for the current offset in the ELF section.
+ // Emit a value stub based on the type of relocation to be replaced by
+ // a real fd later in the pipeline before populating the map.
+ // Map keys are encoded in MapKV entries, so empty array indices are
+ // skipped here.
+ switch t := elf.ST_TYPE(r.Info); t {
+ case elf.STT_FUNC:
+ contents = append(contents, MapKV{uint32(k), r.Name})
+ case elf.STT_OBJECT:
+ contents = append(contents, MapKV{uint32(k), r.Name})
+ default:
+ return nil, fmt.Errorf("unknown relocation type %v for symbol %s", t, r.Name)
+ }
+ }
+
+ return contents, nil
+}
+
+func (ec *elfCode) loadDataSections() error {
+ for _, sec := range ec.sections {
+ if sec.kind != dataSection {
+ continue
+ }
+
+ if sec.references == 0 {
+ // Prune data sections which are not referenced by any
+ // instructions.
+ continue
+ }
+
+ mapSpec := &MapSpec{
+ Name: SanitizeName(sec.Name, -1),
+ Type: Array,
+ KeySize: 4,
+ ValueSize: uint32(sec.Size),
+ MaxEntries: 1,
+ }
+
+ switch sec.Type {
+ // Only open the section if we know there's actual data to be read.
+ case elf.SHT_PROGBITS:
+ data, err := sec.Data()
+ if err != nil {
+ return fmt.Errorf("data section %s: can't get contents: %w", sec.Name, err)
+ }
+
+ if uint64(len(data)) > math.MaxUint32 {
+ return fmt.Errorf("data section %s: contents exceed maximum size", sec.Name)
+ }
+ mapSpec.Contents = []MapKV{{uint32(0), data}}
+
+ case elf.SHT_NOBITS:
+ // NOBITS sections like .bss contain only zeroes, and since data sections
+ // are Arrays, the kernel already preallocates them. Skip reading zeroes
+ // from the ELF.
+ default:
+ return fmt.Errorf("data section %s: unknown section type %s", sec.Name, sec.Type)
+ }
+
+ // It is possible for a data section to exist without a corresponding BTF Datasec
+ // if it only contains anonymous values like macro-defined arrays.
+ if ec.btf != nil {
+ var ds *btf.Datasec
+ if ec.btf.TypeByName(sec.Name, &ds) == nil {
+ // Assign the spec's key and BTF only if the Datasec lookup was successful.
+ mapSpec.Key = &btf.Void{}
+ mapSpec.Value = ds
+ }
+ }
+
+ if strings.HasPrefix(sec.Name, ".rodata") {
+ mapSpec.Flags = unix.BPF_F_RDONLY_PROG
+ mapSpec.Freeze = true
+ }
+
+ ec.maps[sec.Name] = mapSpec
+ }
+
+ return nil
+}
+
+// loadKconfigSection handles the 'virtual' Datasec .kconfig that doesn't
+// have a corresponding ELF section and exist purely in BTF.
+func (ec *elfCode) loadKconfigSection() error {
+ if ec.btf == nil {
+ return nil
+ }
+
+ var ds *btf.Datasec
+ err := ec.btf.TypeByName(".kconfig", &ds)
+ if errors.Is(err, btf.ErrNotFound) {
+ return nil
+ }
+ if err != nil {
+ return err
+ }
+
+ if ds.Size == 0 {
+ return errors.New("zero-length .kconfig")
+ }
+
+ ec.kconfig = &MapSpec{
+ Name: ".kconfig",
+ Type: Array,
+ KeySize: uint32(4),
+ ValueSize: ds.Size,
+ MaxEntries: 1,
+ Flags: unix.BPF_F_RDONLY_PROG,
+ Freeze: true,
+ Key: &btf.Int{Size: 4},
+ Value: ds,
+ }
+
+ return nil
+}
+
+// loadKsymsSection handles the 'virtual' Datasec .ksyms that doesn't
+// have a corresponding ELF section and exist purely in BTF.
+func (ec *elfCode) loadKsymsSection() error {
+ if ec.btf == nil {
+ return nil
+ }
+
+ var ds *btf.Datasec
+ err := ec.btf.TypeByName(".ksyms", &ds)
+ if errors.Is(err, btf.ErrNotFound) {
+ return nil
+ }
+ if err != nil {
+ return err
+ }
+
+ for _, v := range ds.Vars {
+ // we have already checked the .ksyms Datasec to only contain Func Vars.
+ ec.kfuncs[v.Type.TypeName()] = v.Type.(*btf.Func)
+ }
+
+ return nil
+}
+
+func getProgType(sectionName string) (ProgramType, AttachType, uint32, string) {
+ types := []struct {
+ prefix string
+ progType ProgramType
+ attachType AttachType
+ progFlags uint32
+ }{
+ // Please update the types from libbpf.c and follow the order of it.
+ // https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/tools/lib/bpf/libbpf.c
+ {"socket", SocketFilter, AttachNone, 0},
+ {"sk_reuseport/migrate", SkReuseport, AttachSkReuseportSelectOrMigrate, 0},
+ {"sk_reuseport", SkReuseport, AttachSkReuseportSelect, 0},
+ {"kprobe/", Kprobe, AttachNone, 0},
+ {"uprobe/", Kprobe, AttachNone, 0},
+ {"kretprobe/", Kprobe, AttachNone, 0},
+ {"uretprobe/", Kprobe, AttachNone, 0},
+ {"tc", SchedCLS, AttachNone, 0},
+ {"classifier", SchedCLS, AttachNone, 0},
+ {"action", SchedACT, AttachNone, 0},
+ {"tracepoint/", TracePoint, AttachNone, 0},
+ {"tp/", TracePoint, AttachNone, 0},
+ {"raw_tracepoint/", RawTracepoint, AttachNone, 0},
+ {"raw_tp/", RawTracepoint, AttachNone, 0},
+ {"raw_tracepoint.w/", RawTracepointWritable, AttachNone, 0},
+ {"raw_tp.w/", RawTracepointWritable, AttachNone, 0},
+ {"tp_btf/", Tracing, AttachTraceRawTp, 0},
+ {"fentry/", Tracing, AttachTraceFEntry, 0},
+ {"fmod_ret/", Tracing, AttachModifyReturn, 0},
+ {"fexit/", Tracing, AttachTraceFExit, 0},
+ {"fentry.s/", Tracing, AttachTraceFEntry, unix.BPF_F_SLEEPABLE},
+ {"fmod_ret.s/", Tracing, AttachModifyReturn, unix.BPF_F_SLEEPABLE},
+ {"fexit.s/", Tracing, AttachTraceFExit, unix.BPF_F_SLEEPABLE},
+ {"freplace/", Extension, AttachNone, 0},
+ {"lsm/", LSM, AttachLSMMac, 0},
+ {"lsm.s/", LSM, AttachLSMMac, unix.BPF_F_SLEEPABLE},
+ {"iter/", Tracing, AttachTraceIter, 0},
+ {"iter.s/", Tracing, AttachTraceIter, unix.BPF_F_SLEEPABLE},
+ {"syscall", Syscall, AttachNone, 0},
+ {"xdp.frags_devmap/", XDP, AttachXDPDevMap, unix.BPF_F_XDP_HAS_FRAGS},
+ {"xdp_devmap/", XDP, AttachXDPDevMap, 0},
+ {"xdp.frags_cpumap/", XDP, AttachXDPCPUMap, unix.BPF_F_XDP_HAS_FRAGS},
+ {"xdp_cpumap/", XDP, AttachXDPCPUMap, 0},
+ {"xdp.frags", XDP, AttachNone, unix.BPF_F_XDP_HAS_FRAGS},
+ {"xdp", XDP, AttachNone, 0},
+ {"perf_event", PerfEvent, AttachNone, 0},
+ {"lwt_in", LWTIn, AttachNone, 0},
+ {"lwt_out", LWTOut, AttachNone, 0},
+ {"lwt_xmit", LWTXmit, AttachNone, 0},
+ {"lwt_seg6local", LWTSeg6Local, AttachNone, 0},
+ {"cgroup_skb/ingress", CGroupSKB, AttachCGroupInetIngress, 0},
+ {"cgroup_skb/egress", CGroupSKB, AttachCGroupInetEgress, 0},
+ {"cgroup/skb", CGroupSKB, AttachNone, 0},
+ {"cgroup/sock_create", CGroupSock, AttachCGroupInetSockCreate, 0},
+ {"cgroup/sock_release", CGroupSock, AttachCgroupInetSockRelease, 0},
+ {"cgroup/sock", CGroupSock, AttachCGroupInetSockCreate, 0},
+ {"cgroup/post_bind4", CGroupSock, AttachCGroupInet4PostBind, 0},
+ {"cgroup/post_bind6", CGroupSock, AttachCGroupInet6PostBind, 0},
+ {"cgroup/dev", CGroupDevice, AttachCGroupDevice, 0},
+ {"sockops", SockOps, AttachCGroupSockOps, 0},
+ {"sk_skb/stream_parser", SkSKB, AttachSkSKBStreamParser, 0},
+ {"sk_skb/stream_verdict", SkSKB, AttachSkSKBStreamVerdict, 0},
+ {"sk_skb", SkSKB, AttachNone, 0},
+ {"sk_msg", SkMsg, AttachSkMsgVerdict, 0},
+ {"lirc_mode2", LircMode2, AttachLircMode2, 0},
+ {"flow_dissector", FlowDissector, AttachFlowDissector, 0},
+ {"cgroup/bind4", CGroupSockAddr, AttachCGroupInet4Bind, 0},
+ {"cgroup/bind6", CGroupSockAddr, AttachCGroupInet6Bind, 0},
+ {"cgroup/connect4", CGroupSockAddr, AttachCGroupInet4Connect, 0},
+ {"cgroup/connect6", CGroupSockAddr, AttachCGroupInet6Connect, 0},
+ {"cgroup/sendmsg4", CGroupSockAddr, AttachCGroupUDP4Sendmsg, 0},
+ {"cgroup/sendmsg6", CGroupSockAddr, AttachCGroupUDP6Sendmsg, 0},
+ {"cgroup/recvmsg4", CGroupSockAddr, AttachCGroupUDP4Recvmsg, 0},
+ {"cgroup/recvmsg6", CGroupSockAddr, AttachCGroupUDP6Recvmsg, 0},
+ {"cgroup/getpeername4", CGroupSockAddr, AttachCgroupInet4GetPeername, 0},
+ {"cgroup/getpeername6", CGroupSockAddr, AttachCgroupInet6GetPeername, 0},
+ {"cgroup/getsockname4", CGroupSockAddr, AttachCgroupInet4GetSockname, 0},
+ {"cgroup/getsockname6", CGroupSockAddr, AttachCgroupInet6GetSockname, 0},
+ {"cgroup/sysctl", CGroupSysctl, AttachCGroupSysctl, 0},
+ {"cgroup/getsockopt", CGroupSockopt, AttachCGroupGetsockopt, 0},
+ {"cgroup/setsockopt", CGroupSockopt, AttachCGroupSetsockopt, 0},
+ {"struct_ops+", StructOps, AttachNone, 0},
+ {"sk_lookup/", SkLookup, AttachSkLookup, 0},
+ {"seccomp", SocketFilter, AttachNone, 0},
+ {"kprobe.multi", Kprobe, AttachTraceKprobeMulti, 0},
+ {"kretprobe.multi", Kprobe, AttachTraceKprobeMulti, 0},
+ // Document all prefixes in docs/ebpf/concepts/elf-sections.md.
+ }
+
+ for _, t := range types {
+ if !strings.HasPrefix(sectionName, t.prefix) {
+ continue
+ }
+
+ if !strings.HasSuffix(t.prefix, "/") {
+ return t.progType, t.attachType, t.progFlags, ""
+ }
+
+ return t.progType, t.attachType, t.progFlags, sectionName[len(t.prefix):]
+ }
+
+ return UnspecifiedProgram, AttachNone, 0, ""
+}
+
+func (ec *elfCode) loadSectionRelocations(sec *elf.Section, symbols []elf.Symbol) (map[uint64]elf.Symbol, error) {
+ rels := make(map[uint64]elf.Symbol)
+
+ if sec.Entsize < 16 {
+ return nil, fmt.Errorf("section %s: relocations are less than 16 bytes", sec.Name)
+ }
+
+ r := bufio.NewReader(sec.Open())
+ for off := uint64(0); off < sec.Size; off += sec.Entsize {
+ ent := io.LimitReader(r, int64(sec.Entsize))
+
+ var rel elf.Rel64
+ if binary.Read(ent, ec.ByteOrder, &rel) != nil {
+ return nil, fmt.Errorf("can't parse relocation at offset %v", off)
+ }
+
+ symNo := int(elf.R_SYM64(rel.Info) - 1)
+ if symNo >= len(symbols) {
+ return nil, fmt.Errorf("offset %d: symbol %d doesn't exist", off, symNo)
+ }
+
+ symbol := symbols[symNo]
+ rels[rel.Off] = symbol
+ }
+
+ return rels, nil
+}
diff --git a/vendor/github.com/cilium/ebpf/features/doc.go b/vendor/github.com/cilium/ebpf/features/doc.go
new file mode 100644
index 000000000..acc57e3b1
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/features/doc.go
@@ -0,0 +1,19 @@
+// Package features allows probing for BPF features available to the calling process.
+//
+// In general, the error return values from feature probes in this package
+// all have the following semantics unless otherwise specified:
+//
+// err == nil: The feature is available.
+// errors.Is(err, ebpf.ErrNotSupported): The feature is not available.
+// err != nil: Any errors encountered during probe execution, wrapped.
+//
+// Note that the latter case may include false negatives, and that resource
+// creation may succeed despite an error being returned. For example, some
+// map and program types cannot reliably be probed and will return an
+// inconclusive error.
+//
+// As a rule, only `nil` and `ebpf.ErrNotSupported` are conclusive.
+//
+// Probe results are cached by the library and persist throughout any changes
+// to the process' environment, like capability changes.
+package features
diff --git a/vendor/github.com/cilium/ebpf/features/map.go b/vendor/github.com/cilium/ebpf/features/map.go
new file mode 100644
index 000000000..8923e736a
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/features/map.go
@@ -0,0 +1,321 @@
+package features
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "unsafe"
+
+ "github.com/cilium/ebpf"
+ "github.com/cilium/ebpf/internal"
+ "github.com/cilium/ebpf/internal/sys"
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+// HaveMapType probes the running kernel for the availability of the specified map type.
+//
+// See the package documentation for the meaning of the error return value.
+func HaveMapType(mt ebpf.MapType) error {
+ return haveMapTypeMatrix.Result(mt)
+}
+
+func probeCgroupStorageMap(mt sys.MapType) error {
+ // keySize needs to be sizeof(struct{u32 + u64}) = 12 (+ padding = 16)
+ // by using unsafe.Sizeof(int) we are making sure that this works on 32bit and 64bit archs
+ return createMap(&sys.MapCreateAttr{
+ MapType: mt,
+ ValueSize: 4,
+ KeySize: uint32(8 + unsafe.Sizeof(int(0))),
+ MaxEntries: 0,
+ })
+}
+
+func probeStorageMap(mt sys.MapType) error {
+ // maxEntries needs to be 0
+ // BPF_F_NO_PREALLOC needs to be set
+ // btf* fields need to be set
+ // see alloc_check for local_storage map types
+ err := createMap(&sys.MapCreateAttr{
+ MapType: mt,
+ KeySize: 4,
+ ValueSize: 4,
+ MaxEntries: 0,
+ MapFlags: unix.BPF_F_NO_PREALLOC,
+ BtfKeyTypeId: 1,
+ BtfValueTypeId: 1,
+ BtfFd: ^uint32(0),
+ })
+ if errors.Is(err, unix.EBADF) {
+ // Triggered by BtfFd.
+ return nil
+ }
+ return err
+}
+
+func probeNestedMap(mt sys.MapType) error {
+ // assign invalid innerMapFd to pass validation check
+ // will return EBADF
+ err := probeMap(&sys.MapCreateAttr{
+ MapType: mt,
+ InnerMapFd: ^uint32(0),
+ })
+ if errors.Is(err, unix.EBADF) {
+ return nil
+ }
+ return err
+}
+
+func probeMap(attr *sys.MapCreateAttr) error {
+ if attr.KeySize == 0 {
+ attr.KeySize = 4
+ }
+ if attr.ValueSize == 0 {
+ attr.ValueSize = 4
+ }
+ attr.MaxEntries = 1
+ return createMap(attr)
+}
+
+func createMap(attr *sys.MapCreateAttr) error {
+ fd, err := sys.MapCreate(attr)
+ if err == nil {
+ fd.Close()
+ return nil
+ }
+
+ switch {
+ // EINVAL occurs when attempting to create a map with an unknown type.
+ // E2BIG occurs when MapCreateAttr contains non-zero bytes past the end
+ // of the struct known by the running kernel, meaning the kernel is too old
+ // to support the given map type.
+ case errors.Is(err, unix.EINVAL), errors.Is(err, unix.E2BIG):
+ return ebpf.ErrNotSupported
+ }
+
+ return err
+}
+
+var haveMapTypeMatrix = internal.FeatureMatrix[ebpf.MapType]{
+ ebpf.Hash: {Version: "3.19"},
+ ebpf.Array: {Version: "3.19"},
+ ebpf.ProgramArray: {Version: "4.2"},
+ ebpf.PerfEventArray: {Version: "4.3"},
+ ebpf.PerCPUHash: {Version: "4.6"},
+ ebpf.PerCPUArray: {Version: "4.6"},
+ ebpf.StackTrace: {
+ Version: "4.6",
+ Fn: func() error {
+ return probeMap(&sys.MapCreateAttr{
+ MapType: sys.BPF_MAP_TYPE_STACK_TRACE,
+ ValueSize: 8, // sizeof(uint64)
+ })
+ },
+ },
+ ebpf.CGroupArray: {Version: "4.8"},
+ ebpf.LRUHash: {Version: "4.10"},
+ ebpf.LRUCPUHash: {Version: "4.10"},
+ ebpf.LPMTrie: {
+ Version: "4.11",
+ Fn: func() error {
+ // keySize and valueSize need to be sizeof(struct{u32 + u8}) + 1 + padding = 8
+ // BPF_F_NO_PREALLOC needs to be set
+ return probeMap(&sys.MapCreateAttr{
+ MapType: sys.BPF_MAP_TYPE_LPM_TRIE,
+ KeySize: 8,
+ ValueSize: 8,
+ MapFlags: unix.BPF_F_NO_PREALLOC,
+ })
+ },
+ },
+ ebpf.ArrayOfMaps: {
+ Version: "4.12",
+ Fn: func() error { return probeNestedMap(sys.BPF_MAP_TYPE_ARRAY_OF_MAPS) },
+ },
+ ebpf.HashOfMaps: {
+ Version: "4.12",
+ Fn: func() error { return probeNestedMap(sys.BPF_MAP_TYPE_HASH_OF_MAPS) },
+ },
+ ebpf.DevMap: {Version: "4.14"},
+ ebpf.SockMap: {Version: "4.14"},
+ ebpf.CPUMap: {Version: "4.15"},
+ ebpf.XSKMap: {Version: "4.18"},
+ ebpf.SockHash: {Version: "4.18"},
+ ebpf.CGroupStorage: {
+ Version: "4.19",
+ Fn: func() error { return probeCgroupStorageMap(sys.BPF_MAP_TYPE_CGROUP_STORAGE) },
+ },
+ ebpf.ReusePortSockArray: {Version: "4.19"},
+ ebpf.PerCPUCGroupStorage: {
+ Version: "4.20",
+ Fn: func() error { return probeCgroupStorageMap(sys.BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) },
+ },
+ ebpf.Queue: {
+ Version: "4.20",
+ Fn: func() error {
+ return createMap(&sys.MapCreateAttr{
+ MapType: sys.BPF_MAP_TYPE_QUEUE,
+ KeySize: 0,
+ ValueSize: 4,
+ MaxEntries: 1,
+ })
+ },
+ },
+ ebpf.Stack: {
+ Version: "4.20",
+ Fn: func() error {
+ return createMap(&sys.MapCreateAttr{
+ MapType: sys.BPF_MAP_TYPE_STACK,
+ KeySize: 0,
+ ValueSize: 4,
+ MaxEntries: 1,
+ })
+ },
+ },
+ ebpf.SkStorage: {
+ Version: "5.2",
+ Fn: func() error { return probeStorageMap(sys.BPF_MAP_TYPE_SK_STORAGE) },
+ },
+ ebpf.DevMapHash: {Version: "5.4"},
+ ebpf.StructOpsMap: {
+ Version: "5.6",
+ Fn: func() error {
+ // StructOps requires setting a vmlinux type id, but id 1 will always
+ // resolve to some type of integer. This will cause ENOTSUPP.
+ err := probeMap(&sys.MapCreateAttr{
+ MapType: sys.BPF_MAP_TYPE_STRUCT_OPS,
+ BtfVmlinuxValueTypeId: 1,
+ })
+ if errors.Is(err, sys.ENOTSUPP) {
+ // ENOTSUPP means the map type is at least known to the kernel.
+ return nil
+ }
+ return err
+ },
+ },
+ ebpf.RingBuf: {
+ Version: "5.8",
+ Fn: func() error {
+ // keySize and valueSize need to be 0
+ // maxEntries needs to be power of 2 and PAGE_ALIGNED
+ return createMap(&sys.MapCreateAttr{
+ MapType: sys.BPF_MAP_TYPE_RINGBUF,
+ KeySize: 0,
+ ValueSize: 0,
+ MaxEntries: uint32(os.Getpagesize()),
+ })
+ },
+ },
+ ebpf.InodeStorage: {
+ Version: "5.10",
+ Fn: func() error { return probeStorageMap(sys.BPF_MAP_TYPE_INODE_STORAGE) },
+ },
+ ebpf.TaskStorage: {
+ Version: "5.11",
+ Fn: func() error { return probeStorageMap(sys.BPF_MAP_TYPE_TASK_STORAGE) },
+ },
+}
+
+func init() {
+ for mt, ft := range haveMapTypeMatrix {
+ ft.Name = mt.String()
+ if ft.Fn == nil {
+ // Avoid referring to the loop variable in the closure.
+ mt := sys.MapType(mt)
+ ft.Fn = func() error { return probeMap(&sys.MapCreateAttr{MapType: mt}) }
+ }
+ }
+}
+
+// MapFlags document which flags may be feature probed.
+type MapFlags = sys.MapFlags
+
+// Flags which may be feature probed.
+const (
+ BPF_F_NO_PREALLOC = sys.BPF_F_NO_PREALLOC
+ BPF_F_RDONLY_PROG = sys.BPF_F_RDONLY_PROG
+ BPF_F_WRONLY_PROG = sys.BPF_F_WRONLY_PROG
+ BPF_F_MMAPABLE = sys.BPF_F_MMAPABLE
+ BPF_F_INNER_MAP = sys.BPF_F_INNER_MAP
+)
+
+// HaveMapFlag probes the running kernel for the availability of the specified map flag.
+//
+// Returns an error if flag is not one of the flags declared in this package.
+// See the package documentation for the meaning of the error return value.
+func HaveMapFlag(flag MapFlags) (err error) {
+ return haveMapFlagsMatrix.Result(flag)
+}
+
+func probeMapFlag(attr *sys.MapCreateAttr) error {
+ // For now, we do not check if the map type is supported because we only support
+ // probing for flags defined on arrays and hashes that are always supported.
+ // In the future, if we allow probing on flags defined on newer types, checking for map type
+ // support will be required.
+ if attr.MapType == sys.BPF_MAP_TYPE_UNSPEC {
+ attr.MapType = sys.BPF_MAP_TYPE_ARRAY
+ }
+
+ attr.KeySize = 4
+ attr.ValueSize = 4
+ attr.MaxEntries = 1
+
+ fd, err := sys.MapCreate(attr)
+ if err == nil {
+ fd.Close()
+ } else if errors.Is(err, unix.EINVAL) {
+ // EINVAL occurs when attempting to create a map with an unknown type or an unknown flag.
+ err = ebpf.ErrNotSupported
+ }
+
+ return err
+}
+
+var haveMapFlagsMatrix = internal.FeatureMatrix[MapFlags]{
+ BPF_F_NO_PREALLOC: {
+ Version: "4.6",
+ Fn: func() error {
+ return probeMapFlag(&sys.MapCreateAttr{
+ MapType: sys.BPF_MAP_TYPE_HASH,
+ MapFlags: BPF_F_NO_PREALLOC,
+ })
+ },
+ },
+ BPF_F_RDONLY_PROG: {
+ Version: "5.2",
+ Fn: func() error {
+ return probeMapFlag(&sys.MapCreateAttr{
+ MapFlags: BPF_F_RDONLY_PROG,
+ })
+ },
+ },
+ BPF_F_WRONLY_PROG: {
+ Version: "5.2",
+ Fn: func() error {
+ return probeMapFlag(&sys.MapCreateAttr{
+ MapFlags: BPF_F_WRONLY_PROG,
+ })
+ },
+ },
+ BPF_F_MMAPABLE: {
+ Version: "5.5",
+ Fn: func() error {
+ return probeMapFlag(&sys.MapCreateAttr{
+ MapFlags: BPF_F_MMAPABLE,
+ })
+ },
+ },
+ BPF_F_INNER_MAP: {
+ Version: "5.10",
+ Fn: func() error {
+ return probeMapFlag(&sys.MapCreateAttr{
+ MapFlags: BPF_F_INNER_MAP,
+ })
+ },
+ },
+}
+
+func init() {
+ for mf, ft := range haveMapFlagsMatrix {
+ ft.Name = fmt.Sprint(mf)
+ }
+}
diff --git a/vendor/github.com/cilium/ebpf/features/misc.go b/vendor/github.com/cilium/ebpf/features/misc.go
new file mode 100644
index 000000000..de07d3801
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/features/misc.go
@@ -0,0 +1,79 @@
+package features
+
+import (
+ "github.com/cilium/ebpf"
+ "github.com/cilium/ebpf/asm"
+ "github.com/cilium/ebpf/internal"
+)
+
+// HaveLargeInstructions probes the running kernel if more than 4096 instructions
+// per program are supported.
+//
+// Upstream commit c04c0d2b968a ("bpf: increase complexity limit and maximum program size").
+//
+// See the package documentation for the meaning of the error return value.
+var HaveLargeInstructions = internal.NewFeatureTest(">4096 instructions", "5.2", func() error {
+ const maxInsns = 4096
+
+ insns := make(asm.Instructions, maxInsns, maxInsns+1)
+ for i := range insns {
+ insns[i] = asm.Mov.Imm(asm.R0, 1)
+ }
+ insns = append(insns, asm.Return())
+
+ return probeProgram(&ebpf.ProgramSpec{
+ Type: ebpf.SocketFilter,
+ Instructions: insns,
+ })
+})
+
+// HaveBoundedLoops probes the running kernel if bounded loops are supported.
+//
+// Upstream commit 2589726d12a1 ("bpf: introduce bounded loops").
+//
+// See the package documentation for the meaning of the error return value.
+var HaveBoundedLoops = internal.NewFeatureTest("bounded loops", "5.3", func() error {
+ return probeProgram(&ebpf.ProgramSpec{
+ Type: ebpf.SocketFilter,
+ Instructions: asm.Instructions{
+ asm.Mov.Imm(asm.R0, 10),
+ asm.Sub.Imm(asm.R0, 1).WithSymbol("loop"),
+ asm.JNE.Imm(asm.R0, 0, "loop"),
+ asm.Return(),
+ },
+ })
+})
+
+// HaveV2ISA probes the running kernel if instructions of the v2 ISA are supported.
+//
+// Upstream commit 92b31a9af73b ("bpf: add BPF_J{LT,LE,SLT,SLE} instructions").
+//
+// See the package documentation for the meaning of the error return value.
+var HaveV2ISA = internal.NewFeatureTest("v2 ISA", "4.14", func() error {
+ return probeProgram(&ebpf.ProgramSpec{
+ Type: ebpf.SocketFilter,
+ Instructions: asm.Instructions{
+ asm.Mov.Imm(asm.R0, 0),
+ asm.JLT.Imm(asm.R0, 0, "exit"),
+ asm.Mov.Imm(asm.R0, 1),
+ asm.Return().WithSymbol("exit"),
+ },
+ })
+})
+
+// HaveV3ISA probes the running kernel if instructions of the v3 ISA are supported.
+//
+// Upstream commit 092ed0968bb6 ("bpf: verifier support JMP32").
+//
+// See the package documentation for the meaning of the error return value.
+var HaveV3ISA = internal.NewFeatureTest("v3 ISA", "5.1", func() error {
+ return probeProgram(&ebpf.ProgramSpec{
+ Type: ebpf.SocketFilter,
+ Instructions: asm.Instructions{
+ asm.Mov.Imm(asm.R0, 0),
+ asm.JLT.Imm32(asm.R0, 0, "exit"),
+ asm.Mov.Imm(asm.R0, 1),
+ asm.Return().WithSymbol("exit"),
+ },
+ })
+})
diff --git a/vendor/github.com/cilium/ebpf/features/prog.go b/vendor/github.com/cilium/ebpf/features/prog.go
new file mode 100644
index 000000000..a11363796
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/features/prog.go
@@ -0,0 +1,297 @@
+package features
+
+import (
+ "errors"
+ "fmt"
+ "os"
+
+ "github.com/cilium/ebpf"
+ "github.com/cilium/ebpf/asm"
+ "github.com/cilium/ebpf/btf"
+ "github.com/cilium/ebpf/internal"
+ "github.com/cilium/ebpf/internal/sys"
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+// HaveProgType probes the running kernel for the availability of the specified program type.
+//
+// Deprecated: use HaveProgramType() instead.
+var HaveProgType = HaveProgramType
+
+// HaveProgramType probes the running kernel for the availability of the specified program type.
+//
+// See the package documentation for the meaning of the error return value.
+func HaveProgramType(pt ebpf.ProgramType) (err error) {
+ return haveProgramTypeMatrix.Result(pt)
+}
+
+func probeProgram(spec *ebpf.ProgramSpec) error {
+ if spec.Instructions == nil {
+ spec.Instructions = asm.Instructions{
+ asm.LoadImm(asm.R0, 0, asm.DWord),
+ asm.Return(),
+ }
+ }
+ prog, err := ebpf.NewProgramWithOptions(spec, ebpf.ProgramOptions{
+ LogDisabled: true,
+ })
+ if err == nil {
+ prog.Close()
+ }
+
+ switch {
+ // EINVAL occurs when attempting to create a program with an unknown type.
+ // E2BIG occurs when ProgLoadAttr contains non-zero bytes past the end
+ // of the struct known by the running kernel, meaning the kernel is too old
+ // to support the given prog type.
+ case errors.Is(err, unix.EINVAL), errors.Is(err, unix.E2BIG):
+ err = ebpf.ErrNotSupported
+ }
+
+ return err
+}
+
+var haveProgramTypeMatrix = internal.FeatureMatrix[ebpf.ProgramType]{
+ ebpf.SocketFilter: {Version: "3.19"},
+ ebpf.Kprobe: {Version: "4.1"},
+ ebpf.SchedCLS: {Version: "4.1"},
+ ebpf.SchedACT: {Version: "4.1"},
+ ebpf.TracePoint: {Version: "4.7"},
+ ebpf.XDP: {Version: "4.8"},
+ ebpf.PerfEvent: {Version: "4.9"},
+ ebpf.CGroupSKB: {Version: "4.10"},
+ ebpf.CGroupSock: {Version: "4.10"},
+ ebpf.LWTIn: {Version: "4.10"},
+ ebpf.LWTOut: {Version: "4.10"},
+ ebpf.LWTXmit: {Version: "4.10"},
+ ebpf.SockOps: {Version: "4.13"},
+ ebpf.SkSKB: {Version: "4.14"},
+ ebpf.CGroupDevice: {Version: "4.15"},
+ ebpf.SkMsg: {Version: "4.17"},
+ ebpf.RawTracepoint: {Version: "4.17"},
+ ebpf.CGroupSockAddr: {
+ Version: "4.17",
+ Fn: func() error {
+ return probeProgram(&ebpf.ProgramSpec{
+ Type: ebpf.CGroupSockAddr,
+ AttachType: ebpf.AttachCGroupInet4Connect,
+ })
+ },
+ },
+ ebpf.LWTSeg6Local: {Version: "4.18"},
+ ebpf.LircMode2: {Version: "4.18"},
+ ebpf.SkReuseport: {Version: "4.19"},
+ ebpf.FlowDissector: {Version: "4.20"},
+ ebpf.CGroupSysctl: {Version: "5.2"},
+ ebpf.RawTracepointWritable: {Version: "5.2"},
+ ebpf.CGroupSockopt: {
+ Version: "5.3",
+ Fn: func() error {
+ return probeProgram(&ebpf.ProgramSpec{
+ Type: ebpf.CGroupSockopt,
+ AttachType: ebpf.AttachCGroupGetsockopt,
+ })
+ },
+ },
+ ebpf.Tracing: {
+ Version: "5.5",
+ Fn: func() error {
+ return probeProgram(&ebpf.ProgramSpec{
+ Type: ebpf.Tracing,
+ AttachType: ebpf.AttachTraceFEntry,
+ AttachTo: "bpf_init",
+ })
+ },
+ },
+ ebpf.StructOps: {
+ Version: "5.6",
+ Fn: func() error {
+ err := probeProgram(&ebpf.ProgramSpec{
+ Type: ebpf.StructOps,
+ License: "GPL",
+ })
+ if errors.Is(err, sys.ENOTSUPP) {
+ // ENOTSUPP means the program type is at least known to the kernel.
+ return nil
+ }
+ return err
+ },
+ },
+ ebpf.Extension: {
+ Version: "5.6",
+ Fn: func() error {
+ // create btf.Func to add to first ins of target and extension so both progs are btf powered
+ btfFn := btf.Func{
+ Name: "a",
+ Type: &btf.FuncProto{
+ Return: &btf.Int{},
+ },
+ Linkage: btf.GlobalFunc,
+ }
+ insns := asm.Instructions{
+ btf.WithFuncMetadata(asm.Mov.Imm(asm.R0, 0), &btfFn),
+ asm.Return(),
+ }
+
+ // create target prog
+ prog, err := ebpf.NewProgramWithOptions(
+ &ebpf.ProgramSpec{
+ Type: ebpf.XDP,
+ Instructions: insns,
+ },
+ ebpf.ProgramOptions{
+ LogDisabled: true,
+ },
+ )
+ if err != nil {
+ return err
+ }
+ defer prog.Close()
+
+ // probe for Extension prog with target
+ return probeProgram(&ebpf.ProgramSpec{
+ Type: ebpf.Extension,
+ Instructions: insns,
+ AttachTarget: prog,
+ AttachTo: btfFn.Name,
+ })
+ },
+ },
+ ebpf.LSM: {
+ Version: "5.7",
+ Fn: func() error {
+ return probeProgram(&ebpf.ProgramSpec{
+ Type: ebpf.LSM,
+ AttachType: ebpf.AttachLSMMac,
+ AttachTo: "file_mprotect",
+ License: "GPL",
+ })
+ },
+ },
+ ebpf.SkLookup: {
+ Version: "5.9",
+ Fn: func() error {
+ return probeProgram(&ebpf.ProgramSpec{
+ Type: ebpf.SkLookup,
+ AttachType: ebpf.AttachSkLookup,
+ })
+ },
+ },
+ ebpf.Syscall: {
+ Version: "5.14",
+ Fn: func() error {
+ return probeProgram(&ebpf.ProgramSpec{
+ Type: ebpf.Syscall,
+ Flags: unix.BPF_F_SLEEPABLE,
+ })
+ },
+ },
+}
+
+func init() {
+ for key, ft := range haveProgramTypeMatrix {
+ ft.Name = key.String()
+ if ft.Fn == nil {
+ key := key // avoid the dreaded loop variable problem
+ ft.Fn = func() error { return probeProgram(&ebpf.ProgramSpec{Type: key}) }
+ }
+ }
+}
+
+type helperKey struct {
+ typ ebpf.ProgramType
+ helper asm.BuiltinFunc
+}
+
+var helperCache = internal.NewFeatureCache(func(key helperKey) *internal.FeatureTest {
+ return &internal.FeatureTest{
+ Name: fmt.Sprintf("%s for program type %s", key.helper, key.typ),
+ Fn: func() error {
+ return haveProgramHelper(key.typ, key.helper)
+ },
+ }
+})
+
+// HaveProgramHelper probes the running kernel for the availability of the specified helper
+// function to a specified program type.
+// Return values have the following semantics:
+//
+// err == nil: The feature is available.
+// errors.Is(err, ebpf.ErrNotSupported): The feature is not available.
+// err != nil: Any errors encountered during probe execution, wrapped.
+//
+// Note that the latter case may include false negatives, and that program creation may
+// succeed despite an error being returned.
+// Only `nil` and `ebpf.ErrNotSupported` are conclusive.
+//
+// Probe results are cached and persist throughout any process capability changes.
+func HaveProgramHelper(pt ebpf.ProgramType, helper asm.BuiltinFunc) error {
+ if helper > helper.Max() {
+ return os.ErrInvalid
+ }
+
+ return helperCache.Result(helperKey{pt, helper})
+}
+
+func haveProgramHelper(pt ebpf.ProgramType, helper asm.BuiltinFunc) error {
+ if ok := helperProbeNotImplemented(pt); ok {
+ return fmt.Errorf("no feature probe for %v/%v", pt, helper)
+ }
+
+ if err := HaveProgramType(pt); err != nil {
+ return err
+ }
+
+ spec := &ebpf.ProgramSpec{
+ Type: pt,
+ Instructions: asm.Instructions{
+ helper.Call(),
+ asm.LoadImm(asm.R0, 0, asm.DWord),
+ asm.Return(),
+ },
+ License: "GPL",
+ }
+
+ switch pt {
+ case ebpf.CGroupSockAddr:
+ spec.AttachType = ebpf.AttachCGroupInet4Connect
+ case ebpf.CGroupSockopt:
+ spec.AttachType = ebpf.AttachCGroupGetsockopt
+ case ebpf.SkLookup:
+ spec.AttachType = ebpf.AttachSkLookup
+ case ebpf.Syscall:
+ spec.Flags = unix.BPF_F_SLEEPABLE
+ }
+
+ prog, err := ebpf.NewProgramWithOptions(spec, ebpf.ProgramOptions{
+ LogDisabled: true,
+ })
+ if err == nil {
+ prog.Close()
+ }
+
+ switch {
+ // EACCES occurs when attempting to create a program probe with a helper
+ // while the register args when calling this helper aren't set up properly.
+ // We interpret this as the helper being available, because the verifier
+ // returns EINVAL if the helper is not supported by the running kernel.
+ case errors.Is(err, unix.EACCES):
+ // TODO: possibly we need to check verifier output here to be sure
+ err = nil
+
+ // EINVAL occurs when attempting to create a program with an unknown helper.
+ case errors.Is(err, unix.EINVAL):
+ // TODO: possibly we need to check verifier output here to be sure
+ err = ebpf.ErrNotSupported
+ }
+
+ return err
+}
+
+func helperProbeNotImplemented(pt ebpf.ProgramType) bool {
+ switch pt {
+ case ebpf.Extension, ebpf.LSM, ebpf.StructOps, ebpf.Tracing:
+ return true
+ }
+ return false
+}
diff --git a/vendor/github.com/cilium/ebpf/features/version.go b/vendor/github.com/cilium/ebpf/features/version.go
new file mode 100644
index 000000000..69e1c39c1
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/features/version.go
@@ -0,0 +1,18 @@
+package features
+
+import "github.com/cilium/ebpf/internal"
+
+// LinuxVersionCode returns the version of the currently running kernel
+// as defined in the LINUX_VERSION_CODE compile-time macro. It is represented
+// in the format described by the KERNEL_VERSION macro from linux/version.h.
+//
+// Do not use the version to make assumptions about the presence of certain
+// kernel features, always prefer feature probes in this package. Some
+// distributions backport or disable eBPF features.
+func LinuxVersionCode() (uint32, error) {
+ v, err := internal.KernelVersion()
+ if err != nil {
+ return 0, err
+ }
+ return v.Kernel(), nil
+}
diff --git a/vendor/github.com/cilium/ebpf/info.go b/vendor/github.com/cilium/ebpf/info.go
new file mode 100644
index 000000000..79b11c951
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/info.go
@@ -0,0 +1,453 @@
+package ebpf
+
+import (
+ "bufio"
+ "bytes"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+ "syscall"
+ "time"
+ "unsafe"
+
+ "github.com/cilium/ebpf/asm"
+ "github.com/cilium/ebpf/btf"
+ "github.com/cilium/ebpf/internal"
+ "github.com/cilium/ebpf/internal/sys"
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+// MapInfo describes a map.
+type MapInfo struct {
+ Type MapType
+ id MapID
+ KeySize uint32
+ ValueSize uint32
+ MaxEntries uint32
+ Flags uint32
+ // Name as supplied by user space at load time. Available from 4.15.
+ Name string
+}
+
+func newMapInfoFromFd(fd *sys.FD) (*MapInfo, error) {
+ var info sys.MapInfo
+ err := sys.ObjInfo(fd, &info)
+ if errors.Is(err, syscall.EINVAL) {
+ return newMapInfoFromProc(fd)
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ return &MapInfo{
+ MapType(info.Type),
+ MapID(info.Id),
+ info.KeySize,
+ info.ValueSize,
+ info.MaxEntries,
+ uint32(info.MapFlags),
+ unix.ByteSliceToString(info.Name[:]),
+ }, nil
+}
+
+func newMapInfoFromProc(fd *sys.FD) (*MapInfo, error) {
+ var mi MapInfo
+ err := scanFdInfo(fd, map[string]interface{}{
+ "map_type": &mi.Type,
+ "key_size": &mi.KeySize,
+ "value_size": &mi.ValueSize,
+ "max_entries": &mi.MaxEntries,
+ "map_flags": &mi.Flags,
+ })
+ if err != nil {
+ return nil, err
+ }
+ return &mi, nil
+}
+
+// ID returns the map ID.
+//
+// Available from 4.13.
+//
+// The bool return value indicates whether this optional field is available.
+func (mi *MapInfo) ID() (MapID, bool) {
+ return mi.id, mi.id > 0
+}
+
+// programStats holds statistics of a program.
+type programStats struct {
+ // Total accumulated runtime of the program ins ns.
+ runtime time.Duration
+ // Total number of times the program was called.
+ runCount uint64
+}
+
+// ProgramInfo describes a program.
+type ProgramInfo struct {
+ Type ProgramType
+ id ProgramID
+ // Truncated hash of the BPF bytecode. Available from 4.13.
+ Tag string
+ // Name as supplied by user space at load time. Available from 4.15.
+ Name string
+
+ createdByUID uint32
+ haveCreatedByUID bool
+ btf btf.ID
+ stats *programStats
+
+ maps []MapID
+ insns []byte
+
+ lineInfos []byte
+ numLineInfos uint32
+ funcInfos []byte
+ numFuncInfos uint32
+}
+
+func newProgramInfoFromFd(fd *sys.FD) (*ProgramInfo, error) {
+ var info sys.ProgInfo
+ err := sys.ObjInfo(fd, &info)
+ if errors.Is(err, syscall.EINVAL) {
+ return newProgramInfoFromProc(fd)
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ pi := ProgramInfo{
+ Type: ProgramType(info.Type),
+ id: ProgramID(info.Id),
+ Tag: hex.EncodeToString(info.Tag[:]),
+ Name: unix.ByteSliceToString(info.Name[:]),
+ btf: btf.ID(info.BtfId),
+ stats: &programStats{
+ runtime: time.Duration(info.RunTimeNs),
+ runCount: info.RunCnt,
+ },
+ }
+
+ // Start with a clean struct for the second call, otherwise we may get EFAULT.
+ var info2 sys.ProgInfo
+
+ makeSecondCall := false
+
+ if info.NrMapIds > 0 {
+ pi.maps = make([]MapID, info.NrMapIds)
+ info2.NrMapIds = info.NrMapIds
+ info2.MapIds = sys.NewPointer(unsafe.Pointer(&pi.maps[0]))
+ makeSecondCall = true
+ } else if haveProgramInfoMapIDs() == nil {
+ // This program really has no associated maps.
+ pi.maps = make([]MapID, 0)
+ } else {
+ // The kernel doesn't report associated maps.
+ pi.maps = nil
+ }
+
+ // createdByUID and NrMapIds were introduced in the same kernel version.
+ if pi.maps != nil {
+ pi.createdByUID = info.CreatedByUid
+ pi.haveCreatedByUID = true
+ }
+
+ if info.XlatedProgLen > 0 {
+ pi.insns = make([]byte, info.XlatedProgLen)
+ info2.XlatedProgLen = info.XlatedProgLen
+ info2.XlatedProgInsns = sys.NewSlicePointer(pi.insns)
+ makeSecondCall = true
+ }
+
+ if info.NrLineInfo > 0 {
+ pi.lineInfos = make([]byte, btf.LineInfoSize*info.NrLineInfo)
+ info2.LineInfo = sys.NewSlicePointer(pi.lineInfos)
+ info2.LineInfoRecSize = btf.LineInfoSize
+ info2.NrLineInfo = info.NrLineInfo
+ pi.numLineInfos = info.NrLineInfo
+ makeSecondCall = true
+ }
+
+ if info.NrFuncInfo > 0 {
+ pi.funcInfos = make([]byte, btf.FuncInfoSize*info.NrFuncInfo)
+ info2.FuncInfo = sys.NewSlicePointer(pi.funcInfos)
+ info2.FuncInfoRecSize = btf.FuncInfoSize
+ info2.NrFuncInfo = info.NrFuncInfo
+ pi.numFuncInfos = info.NrFuncInfo
+ makeSecondCall = true
+ }
+
+ if makeSecondCall {
+ if err := sys.ObjInfo(fd, &info2); err != nil {
+ return nil, err
+ }
+ }
+
+ return &pi, nil
+}
+
+func newProgramInfoFromProc(fd *sys.FD) (*ProgramInfo, error) {
+ var info ProgramInfo
+ err := scanFdInfo(fd, map[string]interface{}{
+ "prog_type": &info.Type,
+ "prog_tag": &info.Tag,
+ })
+ if errors.Is(err, errMissingFields) {
+ return nil, &internal.UnsupportedFeatureError{
+ Name: "reading program info from /proc/self/fdinfo",
+ MinimumVersion: internal.Version{4, 10, 0},
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ return &info, nil
+}
+
+// ID returns the program ID.
+//
+// Available from 4.13.
+//
+// The bool return value indicates whether this optional field is available.
+func (pi *ProgramInfo) ID() (ProgramID, bool) {
+ return pi.id, pi.id > 0
+}
+
+// CreatedByUID returns the Uid that created the program.
+//
+// Available from 4.15.
+//
+// The bool return value indicates whether this optional field is available.
+func (pi *ProgramInfo) CreatedByUID() (uint32, bool) {
+ return pi.createdByUID, pi.haveCreatedByUID
+}
+
+// BTFID returns the BTF ID associated with the program.
+//
+// The ID is only valid as long as the associated program is kept alive.
+// Available from 5.0.
+//
+// The bool return value indicates whether this optional field is available and
+// populated. (The field may be available but not populated if the kernel
+// supports the field but the program was loaded without BTF information.)
+func (pi *ProgramInfo) BTFID() (btf.ID, bool) {
+ return pi.btf, pi.btf > 0
+}
+
+// RunCount returns the total number of times the program was called.
+//
+// Can return 0 if the collection of statistics is not enabled. See EnableStats().
+// The bool return value indicates whether this optional field is available.
+func (pi *ProgramInfo) RunCount() (uint64, bool) {
+ if pi.stats != nil {
+ return pi.stats.runCount, true
+ }
+ return 0, false
+}
+
+// Runtime returns the total accumulated runtime of the program.
+//
+// Can return 0 if the collection of statistics is not enabled. See EnableStats().
+// The bool return value indicates whether this optional field is available.
+func (pi *ProgramInfo) Runtime() (time.Duration, bool) {
+ if pi.stats != nil {
+ return pi.stats.runtime, true
+ }
+ return time.Duration(0), false
+}
+
+// Instructions returns the 'xlated' instruction stream of the program
+// after it has been verified and rewritten by the kernel. These instructions
+// cannot be loaded back into the kernel as-is, this is mainly used for
+// inspecting loaded programs for troubleshooting, dumping, etc.
+//
+// For example, map accesses are made to reference their kernel map IDs,
+// not the FDs they had when the program was inserted. Note that before
+// the introduction of bpf_insn_prepare_dump in kernel 4.16, xlated
+// instructions were not sanitized, making the output even less reusable
+// and less likely to round-trip or evaluate to the same program Tag.
+//
+// The first instruction is marked as a symbol using the Program's name.
+//
+// If available, the instructions will be annotated with metadata from the
+// BTF. This includes line information and function information. Reading
+// this metadata requires CAP_SYS_ADMIN or equivalent. If capability is
+// unavailable, the instructions will be returned without metadata.
+//
+// Available from 4.13. Requires CAP_BPF or equivalent for plain instructions.
+// Requires CAP_SYS_ADMIN for instructions with metadata.
+func (pi *ProgramInfo) Instructions() (asm.Instructions, error) {
+ // If the calling process is not BPF-capable or if the kernel doesn't
+ // support getting xlated instructions, the field will be zero.
+ if len(pi.insns) == 0 {
+ return nil, fmt.Errorf("insufficient permissions or unsupported kernel: %w", ErrNotSupported)
+ }
+
+ r := bytes.NewReader(pi.insns)
+ var insns asm.Instructions
+ if err := insns.Unmarshal(r, internal.NativeEndian); err != nil {
+ return nil, fmt.Errorf("unmarshaling instructions: %w", err)
+ }
+
+ if pi.btf != 0 {
+ btfh, err := btf.NewHandleFromID(pi.btf)
+ if err != nil {
+ // Getting a BTF handle requires CAP_SYS_ADMIN, if not available we get an -EPERM.
+ // Ignore it and fall back to instructions without metadata.
+ if !errors.Is(err, unix.EPERM) {
+ return nil, fmt.Errorf("unable to get BTF handle: %w", err)
+ }
+ }
+
+ // If we have a BTF handle, we can use it to assign metadata to the instructions.
+ if btfh != nil {
+ defer btfh.Close()
+
+ spec, err := btfh.Spec(nil)
+ if err != nil {
+ return nil, fmt.Errorf("unable to get BTF spec: %w", err)
+ }
+
+ lineInfos, err := btf.LoadLineInfos(
+ bytes.NewReader(pi.lineInfos),
+ internal.NativeEndian,
+ pi.numLineInfos,
+ spec,
+ )
+ if err != nil {
+ return nil, fmt.Errorf("parse line info: %w", err)
+ }
+
+ funcInfos, err := btf.LoadFuncInfos(
+ bytes.NewReader(pi.funcInfos),
+ internal.NativeEndian,
+ pi.numFuncInfos,
+ spec,
+ )
+ if err != nil {
+ return nil, fmt.Errorf("parse func info: %w", err)
+ }
+
+ btf.AssignMetadataToInstructions(insns, funcInfos, lineInfos, btf.CORERelocationInfos{})
+ }
+ }
+
+ fn := btf.FuncMetadata(&insns[0])
+ name := pi.Name
+ if fn != nil {
+ name = fn.Name
+ }
+ insns[0] = insns[0].WithSymbol(name)
+
+ return insns, nil
+}
+
+// MapIDs returns the maps related to the program.
+//
+// Available from 4.15.
+//
+// The bool return value indicates whether this optional field is available.
+func (pi *ProgramInfo) MapIDs() ([]MapID, bool) {
+ return pi.maps, pi.maps != nil
+}
+
+func scanFdInfo(fd *sys.FD, fields map[string]interface{}) error {
+ fh, err := os.Open(fmt.Sprintf("/proc/self/fdinfo/%d", fd.Int()))
+ if err != nil {
+ return err
+ }
+ defer fh.Close()
+
+ if err := scanFdInfoReader(fh, fields); err != nil {
+ return fmt.Errorf("%s: %w", fh.Name(), err)
+ }
+ return nil
+}
+
+var errMissingFields = errors.New("missing fields")
+
+func scanFdInfoReader(r io.Reader, fields map[string]interface{}) error {
+ var (
+ scanner = bufio.NewScanner(r)
+ scanned int
+ )
+
+ for scanner.Scan() {
+ parts := strings.SplitN(scanner.Text(), "\t", 2)
+ if len(parts) != 2 {
+ continue
+ }
+
+ name := strings.TrimSuffix(parts[0], ":")
+ field, ok := fields[string(name)]
+ if !ok {
+ continue
+ }
+
+ if n, err := fmt.Sscanln(parts[1], field); err != nil || n != 1 {
+ return fmt.Errorf("can't parse field %s: %v", name, err)
+ }
+
+ scanned++
+ }
+
+ if err := scanner.Err(); err != nil {
+ return err
+ }
+
+ if len(fields) > 0 && scanned == 0 {
+ return ErrNotSupported
+ }
+
+ if scanned != len(fields) {
+ return errMissingFields
+ }
+
+ return nil
+}
+
+// EnableStats starts the measuring of the runtime
+// and run counts of eBPF programs.
+//
+// Collecting statistics can have an impact on the performance.
+//
+// Requires at least 5.8.
+func EnableStats(which uint32) (io.Closer, error) {
+ fd, err := sys.EnableStats(&sys.EnableStatsAttr{
+ Type: which,
+ })
+ if err != nil {
+ return nil, err
+ }
+ return fd, nil
+}
+
+var haveProgramInfoMapIDs = internal.NewFeatureTest("map IDs in program info", "4.15", func() error {
+ prog, err := progLoad(asm.Instructions{
+ asm.LoadImm(asm.R0, 0, asm.DWord),
+ asm.Return(),
+ }, SocketFilter, "MIT")
+ if err != nil {
+ return err
+ }
+ defer prog.Close()
+
+ err = sys.ObjInfo(prog, &sys.ProgInfo{
+ // NB: Don't need to allocate MapIds since the program isn't using
+ // any maps.
+ NrMapIds: 1,
+ })
+ if errors.Is(err, unix.EINVAL) {
+ // Most likely the syscall doesn't exist.
+ return internal.ErrNotSupported
+ }
+ if errors.Is(err, unix.E2BIG) {
+ // We've hit check_uarg_tail_zero on older kernels.
+ return internal.ErrNotSupported
+ }
+
+ return err
+})
diff --git a/vendor/github.com/cilium/ebpf/internal/align.go b/vendor/github.com/cilium/ebpf/internal/align.go
new file mode 100644
index 000000000..edc898fa9
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/align.go
@@ -0,0 +1,8 @@
+package internal
+
+import "golang.org/x/exp/constraints"
+
+// Align returns 'n' updated to 'alignment' boundary.
+func Align[I constraints.Integer](n, alignment I) I {
+ return (n + alignment - 1) / alignment * alignment
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/buffer.go b/vendor/github.com/cilium/ebpf/internal/buffer.go
new file mode 100644
index 000000000..81c654433
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/buffer.go
@@ -0,0 +1,31 @@
+package internal
+
+import (
+ "bytes"
+ "sync"
+)
+
+var bytesBufferPool = sync.Pool{
+ New: func() interface{} {
+ return new(bytes.Buffer)
+ },
+}
+
+// NewBuffer retrieves a [bytes.Buffer] from a pool an re-initialises it.
+//
+// The returned buffer should be passed to [PutBuffer].
+func NewBuffer(buf []byte) *bytes.Buffer {
+ wr := bytesBufferPool.Get().(*bytes.Buffer)
+ // Reinitialize the Buffer with a new backing slice since it is returned to
+ // the caller by wr.Bytes() below. Pooling is faster despite calling
+ // NewBuffer. The pooled alloc is still reused, it only needs to be zeroed.
+ *wr = *bytes.NewBuffer(buf)
+ return wr
+}
+
+// PutBuffer releases a buffer to the pool.
+func PutBuffer(buf *bytes.Buffer) {
+ // Release reference to the backing buffer.
+ *buf = *bytes.NewBuffer(nil)
+ bytesBufferPool.Put(buf)
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/cpu.go b/vendor/github.com/cilium/ebpf/internal/cpu.go
new file mode 100644
index 000000000..9e908b610
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/cpu.go
@@ -0,0 +1,51 @@
+package internal
+
+import (
+ "fmt"
+ "os"
+ "strings"
+)
+
+// PossibleCPUs returns the max number of CPUs a system may possibly have
+// Logical CPU numbers must be of the form 0-n
+var PossibleCPUs = Memoize(func() (int, error) {
+ return parseCPUsFromFile("/sys/devices/system/cpu/possible")
+})
+
+func parseCPUsFromFile(path string) (int, error) {
+ spec, err := os.ReadFile(path)
+ if err != nil {
+ return 0, err
+ }
+
+ n, err := parseCPUs(string(spec))
+ if err != nil {
+ return 0, fmt.Errorf("can't parse %s: %v", path, err)
+ }
+
+ return n, nil
+}
+
+// parseCPUs parses the number of cpus from a string produced
+// by bitmap_list_string() in the Linux kernel.
+// Multiple ranges are rejected, since they can't be unified
+// into a single number.
+// This is the format of /sys/devices/system/cpu/possible, it
+// is not suitable for /sys/devices/system/cpu/online, etc.
+func parseCPUs(spec string) (int, error) {
+ if strings.Trim(spec, "\n") == "0" {
+ return 1, nil
+ }
+
+ var low, high int
+ n, err := fmt.Sscanf(spec, "%d-%d\n", &low, &high)
+ if n != 2 || err != nil {
+ return 0, fmt.Errorf("invalid format: %s", spec)
+ }
+ if low != 0 {
+ return 0, fmt.Errorf("CPU spec doesn't start at zero: %s", spec)
+ }
+
+ // cpus is 0 indexed
+ return high + 1, nil
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/deque.go b/vendor/github.com/cilium/ebpf/internal/deque.go
new file mode 100644
index 000000000..e3a305021
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/deque.go
@@ -0,0 +1,91 @@
+package internal
+
+import "math/bits"
+
+// Deque implements a double ended queue.
+type Deque[T any] struct {
+ elems []T
+ read, write uint64
+ mask uint64
+}
+
+// Reset clears the contents of the deque while retaining the backing buffer.
+func (dq *Deque[T]) Reset() {
+ var zero T
+
+ for i := dq.read; i < dq.write; i++ {
+ dq.elems[i&dq.mask] = zero
+ }
+
+ dq.read, dq.write = 0, 0
+}
+
+func (dq *Deque[T]) Empty() bool {
+ return dq.read == dq.write
+}
+
+// Push adds an element to the end.
+func (dq *Deque[T]) Push(e T) {
+ dq.Grow(1)
+ dq.elems[dq.write&dq.mask] = e
+ dq.write++
+}
+
+// Shift returns the first element or the zero value.
+func (dq *Deque[T]) Shift() T {
+ var zero T
+
+ if dq.Empty() {
+ return zero
+ }
+
+ index := dq.read & dq.mask
+ t := dq.elems[index]
+ dq.elems[index] = zero
+ dq.read++
+ return t
+}
+
+// Pop returns the last element or the zero value.
+func (dq *Deque[T]) Pop() T {
+ var zero T
+
+ if dq.Empty() {
+ return zero
+ }
+
+ dq.write--
+ index := dq.write & dq.mask
+ t := dq.elems[index]
+ dq.elems[index] = zero
+ return t
+}
+
+// Grow the deque's capacity, if necessary, to guarantee space for another n
+// elements.
+func (dq *Deque[T]) Grow(n int) {
+ have := dq.write - dq.read
+ need := have + uint64(n)
+ if need < have {
+ panic("overflow")
+ }
+ if uint64(len(dq.elems)) >= need {
+ return
+ }
+
+ // Round up to the new power of two which is at least 8.
+ // See https://jameshfisher.com/2018/03/30/round-up-power-2/
+ capacity := 1 << (64 - bits.LeadingZeros64(need-1))
+ if capacity < 8 {
+ capacity = 8
+ }
+
+ elems := make([]T, have, capacity)
+ pivot := dq.read & dq.mask
+ copied := copy(elems, dq.elems[pivot:])
+ copy(elems[copied:], dq.elems[:pivot])
+
+ dq.elems = elems[:capacity]
+ dq.mask = uint64(capacity) - 1
+ dq.read, dq.write = 0, have
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/elf.go b/vendor/github.com/cilium/ebpf/internal/elf.go
new file mode 100644
index 000000000..011581938
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/elf.go
@@ -0,0 +1,102 @@
+package internal
+
+import (
+ "debug/elf"
+ "fmt"
+ "io"
+)
+
+type SafeELFFile struct {
+ *elf.File
+}
+
+// NewSafeELFFile reads an ELF safely.
+//
+// Any panic during parsing is turned into an error. This is necessary since
+// there are a bunch of unfixed bugs in debug/elf.
+//
+// https://github.com/golang/go/issues?q=is%3Aissue+is%3Aopen+debug%2Felf+in%3Atitle
+func NewSafeELFFile(r io.ReaderAt) (safe *SafeELFFile, err error) {
+ defer func() {
+ r := recover()
+ if r == nil {
+ return
+ }
+
+ safe = nil
+ err = fmt.Errorf("reading ELF file panicked: %s", r)
+ }()
+
+ file, err := elf.NewFile(r)
+ if err != nil {
+ return nil, err
+ }
+
+ return &SafeELFFile{file}, nil
+}
+
+// OpenSafeELFFile reads an ELF from a file.
+//
+// It works like NewSafeELFFile, with the exception that safe.Close will
+// close the underlying file.
+func OpenSafeELFFile(path string) (safe *SafeELFFile, err error) {
+ defer func() {
+ r := recover()
+ if r == nil {
+ return
+ }
+
+ safe = nil
+ err = fmt.Errorf("reading ELF file panicked: %s", r)
+ }()
+
+ file, err := elf.Open(path)
+ if err != nil {
+ return nil, err
+ }
+
+ return &SafeELFFile{file}, nil
+}
+
+// Symbols is the safe version of elf.File.Symbols.
+func (se *SafeELFFile) Symbols() (syms []elf.Symbol, err error) {
+ defer func() {
+ r := recover()
+ if r == nil {
+ return
+ }
+
+ syms = nil
+ err = fmt.Errorf("reading ELF symbols panicked: %s", r)
+ }()
+
+ syms, err = se.File.Symbols()
+ return
+}
+
+// DynamicSymbols is the safe version of elf.File.DynamicSymbols.
+func (se *SafeELFFile) DynamicSymbols() (syms []elf.Symbol, err error) {
+ defer func() {
+ r := recover()
+ if r == nil {
+ return
+ }
+
+ syms = nil
+ err = fmt.Errorf("reading ELF dynamic symbols panicked: %s", r)
+ }()
+
+ syms, err = se.File.DynamicSymbols()
+ return
+}
+
+// SectionsByType returns all sections in the file with the specified section type.
+func (se *SafeELFFile) SectionsByType(typ elf.SectionType) []*elf.Section {
+ sections := make([]*elf.Section, 0, 1)
+ for _, section := range se.Sections {
+ if section.Type == typ {
+ sections = append(sections, section)
+ }
+ }
+ return sections
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/endian_be.go b/vendor/github.com/cilium/ebpf/internal/endian_be.go
new file mode 100644
index 000000000..39f49ba3a
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/endian_be.go
@@ -0,0 +1,12 @@
+//go:build armbe || arm64be || mips || mips64 || mips64p32 || ppc64 || s390 || s390x || sparc || sparc64
+
+package internal
+
+import "encoding/binary"
+
+// NativeEndian is set to either binary.BigEndian or binary.LittleEndian,
+// depending on the host's endianness.
+var NativeEndian = binary.BigEndian
+
+// ClangEndian is set to either "el" or "eb" depending on the host's endianness.
+const ClangEndian = "eb"
diff --git a/vendor/github.com/cilium/ebpf/internal/endian_le.go b/vendor/github.com/cilium/ebpf/internal/endian_le.go
new file mode 100644
index 000000000..9488e301b
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/endian_le.go
@@ -0,0 +1,12 @@
+//go:build 386 || amd64 || amd64p32 || arm || arm64 || loong64 || mipsle || mips64le || mips64p32le || ppc64le || riscv64
+
+package internal
+
+import "encoding/binary"
+
+// NativeEndian is set to either binary.BigEndian or binary.LittleEndian,
+// depending on the host's endianness.
+var NativeEndian = binary.LittleEndian
+
+// ClangEndian is set to either "el" or "eb" depending on the host's endianness.
+const ClangEndian = "el"
diff --git a/vendor/github.com/cilium/ebpf/internal/errors.go b/vendor/github.com/cilium/ebpf/internal/errors.go
new file mode 100644
index 000000000..bda01e2fd
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/errors.go
@@ -0,0 +1,198 @@
+package internal
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "strings"
+)
+
+// ErrorWithLog wraps err in a VerifierError that includes the parsed verifier
+// log buffer.
+//
+// The default error output is a summary of the full log. The latter can be
+// accessed via VerifierError.Log or by formatting the error, see Format.
+func ErrorWithLog(source string, err error, log []byte, truncated bool) *VerifierError {
+ const whitespace = "\t\r\v\n "
+
+ // Convert verifier log C string by truncating it on the first 0 byte
+ // and trimming trailing whitespace before interpreting as a Go string.
+ if i := bytes.IndexByte(log, 0); i != -1 {
+ log = log[:i]
+ }
+
+ log = bytes.Trim(log, whitespace)
+ if len(log) == 0 {
+ return &VerifierError{source, err, nil, truncated}
+ }
+
+ logLines := bytes.Split(log, []byte{'\n'})
+ lines := make([]string, 0, len(logLines))
+ for _, line := range logLines {
+ // Don't remove leading white space on individual lines. We rely on it
+ // when outputting logs.
+ lines = append(lines, string(bytes.TrimRight(line, whitespace)))
+ }
+
+ return &VerifierError{source, err, lines, truncated}
+}
+
+// VerifierError includes information from the eBPF verifier.
+//
+// It summarises the log output, see Format if you want to output the full contents.
+type VerifierError struct {
+ source string
+ // The error which caused this error.
+ Cause error
+ // The verifier output split into lines.
+ Log []string
+ // Whether the log output is truncated, based on several heuristics.
+ Truncated bool
+}
+
+func (le *VerifierError) Unwrap() error {
+ return le.Cause
+}
+
+func (le *VerifierError) Error() string {
+ log := le.Log
+ if n := len(log); n > 0 && strings.HasPrefix(log[n-1], "processed ") {
+ // Get rid of "processed 39 insns (limit 1000000) ..." from summary.
+ log = log[:n-1]
+ }
+
+ var b strings.Builder
+ fmt.Fprintf(&b, "%s: %s", le.source, le.Cause.Error())
+
+ n := len(log)
+ if n == 0 {
+ return b.String()
+ }
+
+ lines := log[n-1:]
+ if n >= 2 && (includePreviousLine(log[n-1]) || le.Truncated) {
+ // Add one more line of context if it aids understanding the error.
+ lines = log[n-2:]
+ }
+
+ for _, line := range lines {
+ b.WriteString(": ")
+ b.WriteString(strings.TrimSpace(line))
+ }
+
+ omitted := len(le.Log) - len(lines)
+ if omitted == 0 && !le.Truncated {
+ return b.String()
+ }
+
+ b.WriteString(" (")
+ if le.Truncated {
+ b.WriteString("truncated")
+ }
+
+ if omitted > 0 {
+ if le.Truncated {
+ b.WriteString(", ")
+ }
+ fmt.Fprintf(&b, "%d line(s) omitted", omitted)
+ }
+ b.WriteString(")")
+
+ return b.String()
+}
+
+// includePreviousLine returns true if the given line likely is better
+// understood with additional context from the preceding line.
+func includePreviousLine(line string) bool {
+ // We need to find a good trade off between understandable error messages
+ // and too much complexity here. Checking the string prefix is ok, requiring
+ // regular expressions to do it is probably overkill.
+
+ if strings.HasPrefix(line, "\t") {
+ // [13] STRUCT drm_rect size=16 vlen=4
+ // \tx1 type_id=2
+ return true
+ }
+
+ if len(line) >= 2 && line[0] == 'R' && line[1] >= '0' && line[1] <= '9' {
+ // 0: (95) exit
+ // R0 !read_ok
+ return true
+ }
+
+ if strings.HasPrefix(line, "invalid bpf_context access") {
+ // 0: (79) r6 = *(u64 *)(r1 +0)
+ // func '__x64_sys_recvfrom' arg0 type FWD is not a struct
+ // invalid bpf_context access off=0 size=8
+ return true
+ }
+
+ return false
+}
+
+// Format the error.
+//
+// Understood verbs are %s and %v, which are equivalent to calling Error(). %v
+// allows outputting additional information using the following flags:
+//
+// %+v: Output the first lines, or all lines if no width is given.
+// %-v: Output the last lines, or all lines if no width is given.
+//
+// Use width to specify how many lines to output. Use the '-' flag to output
+// lines from the end of the log instead of the beginning.
+func (le *VerifierError) Format(f fmt.State, verb rune) {
+ switch verb {
+ case 's':
+ _, _ = io.WriteString(f, le.Error())
+
+ case 'v':
+ n, haveWidth := f.Width()
+ if !haveWidth || n > len(le.Log) {
+ n = len(le.Log)
+ }
+
+ if !f.Flag('+') && !f.Flag('-') {
+ if haveWidth {
+ _, _ = io.WriteString(f, "%!v(BADWIDTH)")
+ return
+ }
+
+ _, _ = io.WriteString(f, le.Error())
+ return
+ }
+
+ if f.Flag('+') && f.Flag('-') {
+ _, _ = io.WriteString(f, "%!v(BADFLAG)")
+ return
+ }
+
+ fmt.Fprintf(f, "%s: %s:", le.source, le.Cause.Error())
+
+ omitted := len(le.Log) - n
+ lines := le.Log[:n]
+ if f.Flag('-') {
+ // Print last instead of first lines.
+ lines = le.Log[len(le.Log)-n:]
+ if omitted > 0 {
+ fmt.Fprintf(f, "\n\t(%d line(s) omitted)", omitted)
+ }
+ }
+
+ for _, line := range lines {
+ fmt.Fprintf(f, "\n\t%s", line)
+ }
+
+ if !f.Flag('-') {
+ if omitted > 0 {
+ fmt.Fprintf(f, "\n\t(%d line(s) omitted)", omitted)
+ }
+ }
+
+ if le.Truncated {
+ fmt.Fprintf(f, "\n\t(truncated)")
+ }
+
+ default:
+ fmt.Fprintf(f, "%%!%c(BADVERB)", verb)
+ }
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/feature.go b/vendor/github.com/cilium/ebpf/internal/feature.go
new file mode 100644
index 000000000..b1f650751
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/feature.go
@@ -0,0 +1,184 @@
+package internal
+
+import (
+ "errors"
+ "fmt"
+ "sync"
+)
+
+// ErrNotSupported indicates that a feature is not supported by the current kernel.
+var ErrNotSupported = errors.New("not supported")
+
+// UnsupportedFeatureError is returned by FeatureTest() functions.
+type UnsupportedFeatureError struct {
+ // The minimum Linux mainline version required for this feature.
+ // Used for the error string, and for sanity checking during testing.
+ MinimumVersion Version
+
+ // The name of the feature that isn't supported.
+ Name string
+}
+
+func (ufe *UnsupportedFeatureError) Error() string {
+ if ufe.MinimumVersion.Unspecified() {
+ return fmt.Sprintf("%s not supported", ufe.Name)
+ }
+ return fmt.Sprintf("%s not supported (requires >= %s)", ufe.Name, ufe.MinimumVersion)
+}
+
+// Is indicates that UnsupportedFeatureError is ErrNotSupported.
+func (ufe *UnsupportedFeatureError) Is(target error) bool {
+ return target == ErrNotSupported
+}
+
+// FeatureTest caches the result of a [FeatureTestFn].
+//
+// Fields should not be modified after creation.
+type FeatureTest struct {
+ // The name of the feature being detected.
+ Name string
+ // Version in in the form Major.Minor[.Patch].
+ Version string
+ // The feature test itself.
+ Fn FeatureTestFn
+
+ mu sync.RWMutex
+ done bool
+ result error
+}
+
+// FeatureTestFn is used to determine whether the kernel supports
+// a certain feature.
+//
+// The return values have the following semantics:
+//
+// err == ErrNotSupported: the feature is not available
+// err == nil: the feature is available
+// err != nil: the test couldn't be executed
+type FeatureTestFn func() error
+
+// NewFeatureTest is a convenient way to create a single [FeatureTest].
+func NewFeatureTest(name, version string, fn FeatureTestFn) func() error {
+ ft := &FeatureTest{
+ Name: name,
+ Version: version,
+ Fn: fn,
+ }
+
+ return ft.execute
+}
+
+// execute the feature test.
+//
+// The result is cached if the test is conclusive.
+//
+// See [FeatureTestFn] for the meaning of the returned error.
+func (ft *FeatureTest) execute() error {
+ ft.mu.RLock()
+ result, done := ft.result, ft.done
+ ft.mu.RUnlock()
+
+ if done {
+ return result
+ }
+
+ ft.mu.Lock()
+ defer ft.mu.Unlock()
+
+ // The test may have been executed by another caller while we were
+ // waiting to acquire ft.mu.
+ if ft.done {
+ return ft.result
+ }
+
+ err := ft.Fn()
+ if err == nil {
+ ft.done = true
+ return nil
+ }
+
+ if errors.Is(err, ErrNotSupported) {
+ var v Version
+ if ft.Version != "" {
+ v, err = NewVersion(ft.Version)
+ if err != nil {
+ return fmt.Errorf("feature %s: %w", ft.Name, err)
+ }
+ }
+
+ ft.done = true
+ ft.result = &UnsupportedFeatureError{
+ MinimumVersion: v,
+ Name: ft.Name,
+ }
+
+ return ft.result
+ }
+
+ // We couldn't execute the feature test to a point
+ // where it could make a determination.
+ // Don't cache the result, just return it.
+ return fmt.Errorf("detect support for %s: %w", ft.Name, err)
+}
+
+// FeatureMatrix groups multiple related feature tests into a map.
+//
+// Useful when there is a small number of discrete features which are known
+// at compile time.
+//
+// It must not be modified concurrently with calling [FeatureMatrix.Result].
+type FeatureMatrix[K comparable] map[K]*FeatureTest
+
+// Result returns the outcome of the feature test for the given key.
+//
+// It's safe to call this function concurrently.
+func (fm FeatureMatrix[K]) Result(key K) error {
+ ft, ok := fm[key]
+ if !ok {
+ return fmt.Errorf("no feature probe for %v", key)
+ }
+
+ return ft.execute()
+}
+
+// FeatureCache caches a potentially unlimited number of feature probes.
+//
+// Useful when there is a high cardinality for a feature test.
+type FeatureCache[K comparable] struct {
+ mu sync.RWMutex
+ newTest func(K) *FeatureTest
+ features map[K]*FeatureTest
+}
+
+func NewFeatureCache[K comparable](newTest func(K) *FeatureTest) *FeatureCache[K] {
+ return &FeatureCache[K]{
+ newTest: newTest,
+ features: make(map[K]*FeatureTest),
+ }
+}
+
+func (fc *FeatureCache[K]) Result(key K) error {
+ // NB: Executing the feature test happens without fc.mu taken.
+ return fc.retrieve(key).execute()
+}
+
+func (fc *FeatureCache[K]) retrieve(key K) *FeatureTest {
+ fc.mu.RLock()
+ ft := fc.features[key]
+ fc.mu.RUnlock()
+
+ if ft != nil {
+ return ft
+ }
+
+ fc.mu.Lock()
+ defer fc.mu.Unlock()
+
+ if ft := fc.features[key]; ft != nil {
+ return ft
+ }
+
+ ft = fc.newTest(key)
+ fc.features[key] = ft
+ return ft
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/io.go b/vendor/github.com/cilium/ebpf/internal/io.go
new file mode 100644
index 000000000..1eaf4775a
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/io.go
@@ -0,0 +1,128 @@
+package internal
+
+import (
+ "bufio"
+ "bytes"
+ "compress/gzip"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "sync"
+)
+
+// NewBufferedSectionReader wraps an io.ReaderAt in an appropriately-sized
+// buffered reader. It is a convenience function for reading subsections of
+// ELF sections while minimizing the amount of read() syscalls made.
+//
+// Syscall overhead is non-negligible in continuous integration context
+// where ELFs might be accessed over virtual filesystems with poor random
+// access performance. Buffering reads makes sense because (sub)sections
+// end up being read completely anyway.
+//
+// Use instead of the r.Seek() + io.LimitReader() pattern.
+func NewBufferedSectionReader(ra io.ReaderAt, off, n int64) *bufio.Reader {
+ // Clamp the size of the buffer to one page to avoid slurping large parts
+ // of a file into memory. bufio.NewReader uses a hardcoded default buffer
+ // of 4096. Allow arches with larger pages to allocate more, but don't
+ // allocate a fixed 4k buffer if we only need to read a small segment.
+ buf := n
+ if ps := int64(os.Getpagesize()); n > ps {
+ buf = ps
+ }
+
+ return bufio.NewReaderSize(io.NewSectionReader(ra, off, n), int(buf))
+}
+
+// DiscardZeroes makes sure that all written bytes are zero
+// before discarding them.
+type DiscardZeroes struct{}
+
+func (DiscardZeroes) Write(p []byte) (int, error) {
+ for _, b := range p {
+ if b != 0 {
+ return 0, errors.New("encountered non-zero byte")
+ }
+ }
+ return len(p), nil
+}
+
+// ReadAllCompressed decompresses a gzipped file into memory.
+func ReadAllCompressed(file string) ([]byte, error) {
+ fh, err := os.Open(file)
+ if err != nil {
+ return nil, err
+ }
+ defer fh.Close()
+
+ gz, err := gzip.NewReader(fh)
+ if err != nil {
+ return nil, err
+ }
+ defer gz.Close()
+
+ return io.ReadAll(gz)
+}
+
+// ReadUint64FromFile reads a uint64 from a file.
+//
+// format specifies the contents of the file in fmt.Scanf syntax.
+func ReadUint64FromFile(format string, path ...string) (uint64, error) {
+ filename := filepath.Join(path...)
+ data, err := os.ReadFile(filename)
+ if err != nil {
+ return 0, fmt.Errorf("reading file %q: %w", filename, err)
+ }
+
+ var value uint64
+ n, err := fmt.Fscanf(bytes.NewReader(data), format, &value)
+ if err != nil {
+ return 0, fmt.Errorf("parsing file %q: %w", filename, err)
+ }
+ if n != 1 {
+ return 0, fmt.Errorf("parsing file %q: expected 1 item, got %d", filename, n)
+ }
+
+ return value, nil
+}
+
+type uint64FromFileKey struct {
+ format, path string
+}
+
+var uint64FromFileCache = struct {
+ sync.RWMutex
+ values map[uint64FromFileKey]uint64
+}{
+ values: map[uint64FromFileKey]uint64{},
+}
+
+// ReadUint64FromFileOnce is like readUint64FromFile but memoizes the result.
+func ReadUint64FromFileOnce(format string, path ...string) (uint64, error) {
+ filename := filepath.Join(path...)
+ key := uint64FromFileKey{format, filename}
+
+ uint64FromFileCache.RLock()
+ if value, ok := uint64FromFileCache.values[key]; ok {
+ uint64FromFileCache.RUnlock()
+ return value, nil
+ }
+ uint64FromFileCache.RUnlock()
+
+ value, err := ReadUint64FromFile(format, filename)
+ if err != nil {
+ return 0, err
+ }
+
+ uint64FromFileCache.Lock()
+ defer uint64FromFileCache.Unlock()
+
+ if value, ok := uint64FromFileCache.values[key]; ok {
+ // Someone else got here before us, use what is cached.
+ return value, nil
+ }
+
+ uint64FromFileCache.values[key] = value
+ return value, nil
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/kconfig/kconfig.go b/vendor/github.com/cilium/ebpf/internal/kconfig/kconfig.go
new file mode 100644
index 000000000..d95e7eb0e
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/kconfig/kconfig.go
@@ -0,0 +1,267 @@
+package kconfig
+
+import (
+ "bufio"
+ "bytes"
+ "compress/gzip"
+ "fmt"
+ "io"
+ "math"
+ "os"
+ "strconv"
+ "strings"
+
+ "github.com/cilium/ebpf/btf"
+ "github.com/cilium/ebpf/internal"
+)
+
+// Find find a kconfig file on the host.
+// It first reads from /boot/config- of the current running kernel and tries
+// /proc/config.gz if nothing was found in /boot.
+// If none of the file provide a kconfig, it returns an error.
+func Find() (*os.File, error) {
+ kernelRelease, err := internal.KernelRelease()
+ if err != nil {
+ return nil, fmt.Errorf("cannot get kernel release: %w", err)
+ }
+
+ path := "/boot/config-" + kernelRelease
+ f, err := os.Open(path)
+ if err == nil {
+ return f, nil
+ }
+
+ f, err = os.Open("/proc/config.gz")
+ if err == nil {
+ return f, nil
+ }
+
+ return nil, fmt.Errorf("neither %s nor /proc/config.gz provide a kconfig", path)
+}
+
+// Parse parses the kconfig file for which a reader is given.
+// All the CONFIG_* which are in filter and which are set set will be
+// put in the returned map as key with their corresponding value as map value.
+// If filter is nil, no filtering will occur.
+// If the kconfig file is not valid, error will be returned.
+func Parse(source io.ReaderAt, filter map[string]struct{}) (map[string]string, error) {
+ var r io.Reader
+ zr, err := gzip.NewReader(io.NewSectionReader(source, 0, math.MaxInt64))
+ if err != nil {
+ r = io.NewSectionReader(source, 0, math.MaxInt64)
+ } else {
+ // Source is gzip compressed, transparently decompress.
+ r = zr
+ }
+
+ ret := make(map[string]string, len(filter))
+
+ s := bufio.NewScanner(r)
+
+ for s.Scan() {
+ line := s.Bytes()
+ err = processKconfigLine(line, ret, filter)
+ if err != nil {
+ return nil, fmt.Errorf("cannot parse line: %w", err)
+ }
+
+ if filter != nil && len(ret) == len(filter) {
+ break
+ }
+ }
+
+ if err := s.Err(); err != nil {
+ return nil, fmt.Errorf("cannot parse: %w", err)
+ }
+
+ if zr != nil {
+ return ret, zr.Close()
+ }
+
+ return ret, nil
+}
+
+// Golang translation of libbpf bpf_object__process_kconfig_line():
+// https://github.com/libbpf/libbpf/blob/fbd60dbff51c870f5e80a17c4f2fd639eb80af90/src/libbpf.c#L1874
+// It does the same checks but does not put the data inside the BPF map.
+func processKconfigLine(line []byte, m map[string]string, filter map[string]struct{}) error {
+ // Ignore empty lines and "# CONFIG_* is not set".
+ if !bytes.HasPrefix(line, []byte("CONFIG_")) {
+ return nil
+ }
+
+ key, value, found := bytes.Cut(line, []byte{'='})
+ if !found {
+ return fmt.Errorf("line %q does not contain separator '='", line)
+ }
+
+ if len(value) == 0 {
+ return fmt.Errorf("line %q has no value", line)
+ }
+
+ if filter != nil {
+ // NB: map[string(key)] gets special optimisation help from the compiler
+ // and doesn't allocate. Don't turn this into a variable.
+ _, ok := filter[string(key)]
+ if !ok {
+ return nil
+ }
+ }
+
+ // This can seem odd, but libbpf only sets the value the first time the key is
+ // met:
+ // https://github.com/torvalds/linux/blob/0d85b27b0cc6/tools/lib/bpf/libbpf.c#L1906-L1908
+ _, ok := m[string(key)]
+ if !ok {
+ m[string(key)] = string(value)
+ }
+
+ return nil
+}
+
+// PutValue translates the value given as parameter depending on the BTF
+// type, the translated value is then written to the byte array.
+func PutValue(data []byte, typ btf.Type, value string) error {
+ typ = btf.UnderlyingType(typ)
+
+ switch value {
+ case "y", "n", "m":
+ return putValueTri(data, typ, value)
+ default:
+ if strings.HasPrefix(value, `"`) {
+ return putValueString(data, typ, value)
+ }
+ return putValueNumber(data, typ, value)
+ }
+}
+
+// Golang translation of libbpf_tristate enum:
+// https://github.com/libbpf/libbpf/blob/fbd60dbff51c870f5e80a17c4f2fd639eb80af90/src/bpf_helpers.h#L169
+type triState int
+
+const (
+ TriNo triState = 0
+ TriYes triState = 1
+ TriModule triState = 2
+)
+
+func putValueTri(data []byte, typ btf.Type, value string) error {
+ switch v := typ.(type) {
+ case *btf.Int:
+ if v.Encoding != btf.Bool {
+ return fmt.Errorf("cannot add tri value, expected btf.Bool, got: %v", v.Encoding)
+ }
+
+ if v.Size != 1 {
+ return fmt.Errorf("cannot add tri value, expected size of 1 byte, got: %d", v.Size)
+ }
+
+ switch value {
+ case "y":
+ data[0] = 1
+ case "n":
+ data[0] = 0
+ default:
+ return fmt.Errorf("cannot use %q for btf.Bool", value)
+ }
+ case *btf.Enum:
+ if v.Name != "libbpf_tristate" {
+ return fmt.Errorf("cannot use enum %q, only libbpf_tristate is supported", v.Name)
+ }
+
+ var tri triState
+ switch value {
+ case "y":
+ tri = TriYes
+ case "m":
+ tri = TriModule
+ case "n":
+ tri = TriNo
+ default:
+ return fmt.Errorf("value %q is not support for libbpf_tristate", value)
+ }
+
+ internal.NativeEndian.PutUint64(data, uint64(tri))
+ default:
+ return fmt.Errorf("cannot add number value, expected btf.Int or btf.Enum, got: %T", v)
+ }
+
+ return nil
+}
+
+func putValueString(data []byte, typ btf.Type, value string) error {
+ array, ok := typ.(*btf.Array)
+ if !ok {
+ return fmt.Errorf("cannot add string value, expected btf.Array, got %T", array)
+ }
+
+ contentType, ok := btf.UnderlyingType(array.Type).(*btf.Int)
+ if !ok {
+ return fmt.Errorf("cannot add string value, expected array of btf.Int, got %T", contentType)
+ }
+
+ // Any Int, which is not bool, of one byte could be used to store char:
+ // https://github.com/torvalds/linux/blob/1a5304fecee5/tools/lib/bpf/libbpf.c#L3637-L3638
+ if contentType.Size != 1 && contentType.Encoding != btf.Bool {
+ return fmt.Errorf("cannot add string value, expected array of btf.Int of size 1, got array of btf.Int of size: %v", contentType.Size)
+ }
+
+ if !strings.HasPrefix(value, `"`) || !strings.HasSuffix(value, `"`) {
+ return fmt.Errorf(`value %q must start and finish with '"'`, value)
+ }
+
+ str := strings.Trim(value, `"`)
+
+ // We need to trim string if the bpf array is smaller.
+ if uint32(len(str)) >= array.Nelems {
+ str = str[:array.Nelems]
+ }
+
+ // Write the string content to .kconfig.
+ copy(data, str)
+
+ return nil
+}
+
+func putValueNumber(data []byte, typ btf.Type, value string) error {
+ integer, ok := typ.(*btf.Int)
+ if !ok {
+ return fmt.Errorf("cannot add number value, expected *btf.Int, got: %T", integer)
+ }
+
+ size := integer.Size
+ sizeInBits := size * 8
+
+ var n uint64
+ var err error
+ if integer.Encoding == btf.Signed {
+ parsed, e := strconv.ParseInt(value, 0, int(sizeInBits))
+
+ n = uint64(parsed)
+ err = e
+ } else {
+ parsed, e := strconv.ParseUint(value, 0, int(sizeInBits))
+
+ n = uint64(parsed)
+ err = e
+ }
+
+ if err != nil {
+ return fmt.Errorf("cannot parse value: %w", err)
+ }
+
+ switch size {
+ case 1:
+ data[0] = byte(n)
+ case 2:
+ internal.NativeEndian.PutUint16(data, uint16(n))
+ case 4:
+ internal.NativeEndian.PutUint32(data, uint32(n))
+ case 8:
+ internal.NativeEndian.PutUint64(data, uint64(n))
+ default:
+ return fmt.Errorf("size (%d) is not valid, expected: 1, 2, 4 or 8", size)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/memoize.go b/vendor/github.com/cilium/ebpf/internal/memoize.go
new file mode 100644
index 000000000..3de0a3fb9
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/memoize.go
@@ -0,0 +1,26 @@
+package internal
+
+import (
+ "sync"
+)
+
+type memoizedFunc[T any] struct {
+ once sync.Once
+ fn func() (T, error)
+ result T
+ err error
+}
+
+func (mf *memoizedFunc[T]) do() (T, error) {
+ mf.once.Do(func() {
+ mf.result, mf.err = mf.fn()
+ })
+ return mf.result, mf.err
+}
+
+// Memoize the result of a function call.
+//
+// fn is only ever called once, even if it returns an error.
+func Memoize[T any](fn func() (T, error)) func() (T, error) {
+ return (&memoizedFunc[T]{fn: fn}).do
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/output.go b/vendor/github.com/cilium/ebpf/internal/output.go
new file mode 100644
index 000000000..dd6e6cbaf
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/output.go
@@ -0,0 +1,97 @@
+package internal
+
+import (
+ "bytes"
+ "errors"
+ "go/format"
+ "go/scanner"
+ "io"
+ "reflect"
+ "strings"
+ "unicode"
+)
+
+// Identifier turns a C style type or field name into an exportable Go equivalent.
+func Identifier(str string) string {
+ prev := rune(-1)
+ return strings.Map(func(r rune) rune {
+ // See https://golang.org/ref/spec#Identifiers
+ switch {
+ case unicode.IsLetter(r):
+ if prev == -1 {
+ r = unicode.ToUpper(r)
+ }
+
+ case r == '_':
+ switch {
+ // The previous rune was deleted, or we are at the
+ // beginning of the string.
+ case prev == -1:
+ fallthrough
+
+ // The previous rune is a lower case letter or a digit.
+ case unicode.IsDigit(prev) || (unicode.IsLetter(prev) && unicode.IsLower(prev)):
+ // delete the current rune, and force the
+ // next character to be uppercased.
+ r = -1
+ }
+
+ case unicode.IsDigit(r):
+
+ default:
+ // Delete the current rune. prev is unchanged.
+ return -1
+ }
+
+ prev = r
+ return r
+ }, str)
+}
+
+// WriteFormatted outputs a formatted src into out.
+//
+// If formatting fails it returns an informative error message.
+func WriteFormatted(src []byte, out io.Writer) error {
+ formatted, err := format.Source(src)
+ if err == nil {
+ _, err = out.Write(formatted)
+ return err
+ }
+
+ var el scanner.ErrorList
+ if !errors.As(err, &el) {
+ return err
+ }
+
+ var nel scanner.ErrorList
+ for _, err := range el {
+ if !err.Pos.IsValid() {
+ nel = append(nel, err)
+ continue
+ }
+
+ buf := src[err.Pos.Offset:]
+ nl := bytes.IndexRune(buf, '\n')
+ if nl == -1 {
+ nel = append(nel, err)
+ continue
+ }
+
+ err.Msg += ": " + string(buf[:nl])
+ nel = append(nel, err)
+ }
+
+ return nel
+}
+
+// GoTypeName is like %T, but elides the package name.
+//
+// Pointers to a type are peeled off.
+func GoTypeName(t any) string {
+ rT := reflect.TypeOf(t)
+ for rT.Kind() == reflect.Pointer {
+ rT = rT.Elem()
+ }
+ // Doesn't return the correct Name for generic types due to https://github.com/golang/go/issues/55924
+ return rT.Name()
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/pinning.go b/vendor/github.com/cilium/ebpf/internal/pinning.go
new file mode 100644
index 000000000..01d892f93
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/pinning.go
@@ -0,0 +1,65 @@
+package internal
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+ "runtime"
+
+ "github.com/cilium/ebpf/internal/sys"
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+func Pin(currentPath, newPath string, fd *sys.FD) error {
+ if newPath == "" {
+ return errors.New("given pinning path cannot be empty")
+ }
+ if currentPath == newPath {
+ return nil
+ }
+
+ fsType, err := FSType(filepath.Dir(newPath))
+ if err != nil {
+ return err
+ }
+ if fsType != unix.BPF_FS_MAGIC {
+ return fmt.Errorf("%s is not on a bpf filesystem", newPath)
+ }
+
+ defer runtime.KeepAlive(fd)
+
+ if currentPath == "" {
+ return sys.ObjPin(&sys.ObjPinAttr{
+ Pathname: sys.NewStringPointer(newPath),
+ BpfFd: fd.Uint(),
+ })
+ }
+
+ // Renameat2 is used instead of os.Rename to disallow the new path replacing
+ // an existing path.
+ err = unix.Renameat2(unix.AT_FDCWD, currentPath, unix.AT_FDCWD, newPath, unix.RENAME_NOREPLACE)
+ if err == nil {
+ // Object is now moved to the new pinning path.
+ return nil
+ }
+ if !os.IsNotExist(err) {
+ return fmt.Errorf("unable to move pinned object to new path %v: %w", newPath, err)
+ }
+ // Internal state not in sync with the file system so let's fix it.
+ return sys.ObjPin(&sys.ObjPinAttr{
+ Pathname: sys.NewStringPointer(newPath),
+ BpfFd: fd.Uint(),
+ })
+}
+
+func Unpin(pinnedPath string) error {
+ if pinnedPath == "" {
+ return nil
+ }
+ err := os.Remove(pinnedPath)
+ if err == nil || os.IsNotExist(err) {
+ return nil
+ }
+ return err
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/platform.go b/vendor/github.com/cilium/ebpf/internal/platform.go
new file mode 100644
index 000000000..6e90f2ef7
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/platform.go
@@ -0,0 +1,43 @@
+package internal
+
+import (
+ "runtime"
+)
+
+// PlatformPrefix returns the platform-dependent syscall wrapper prefix used by
+// the linux kernel.
+//
+// Based on https://github.com/golang/go/blob/master/src/go/build/syslist.go
+// and https://github.com/libbpf/libbpf/blob/master/src/libbpf.c#L10047
+func PlatformPrefix() string {
+ switch runtime.GOARCH {
+ case "386":
+ return "__ia32_"
+ case "amd64", "amd64p32":
+ return "__x64_"
+
+ case "arm", "armbe":
+ return "__arm_"
+ case "arm64", "arm64be":
+ return "__arm64_"
+
+ case "mips", "mipsle", "mips64", "mips64le", "mips64p32", "mips64p32le":
+ return "__mips_"
+
+ case "s390":
+ return "__s390_"
+ case "s390x":
+ return "__s390x_"
+
+ case "riscv", "riscv64":
+ return "__riscv_"
+
+ case "ppc":
+ return "__powerpc_"
+ case "ppc64", "ppc64le":
+ return "__powerpc64_"
+
+ default:
+ return ""
+ }
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/prog.go b/vendor/github.com/cilium/ebpf/internal/prog.go
new file mode 100644
index 000000000..d629145b6
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/prog.go
@@ -0,0 +1,11 @@
+package internal
+
+// EmptyBPFContext is the smallest-possible BPF input context to be used for
+// invoking `Program.{Run,Benchmark,Test}`.
+//
+// Programs require a context input buffer of at least 15 bytes. Looking in
+// net/bpf/test_run.c, bpf_test_init() requires that the input is at least
+// ETH_HLEN (14) bytes. As of Linux commit fd18942 ("bpf: Don't redirect packets
+// with invalid pkt_len"), it also requires the skb to be non-empty after
+// removing the Layer 2 header.
+var EmptyBPFContext = make([]byte, 15)
diff --git a/vendor/github.com/cilium/ebpf/internal/statfs.go b/vendor/github.com/cilium/ebpf/internal/statfs.go
new file mode 100644
index 000000000..44c02d676
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/statfs.go
@@ -0,0 +1,23 @@
+package internal
+
+import (
+ "unsafe"
+
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+func FSType(path string) (int64, error) {
+ var statfs unix.Statfs_t
+ if err := unix.Statfs(path, &statfs); err != nil {
+ return 0, err
+ }
+
+ fsType := int64(statfs.Type)
+ if unsafe.Sizeof(statfs.Type) == 4 {
+ // We're on a 32 bit arch, where statfs.Type is int32. bpfFSType is a
+ // negative number when interpreted as int32 so we need to cast via
+ // uint32 to avoid sign extension.
+ fsType = int64(uint32(statfs.Type))
+ }
+ return fsType, nil
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/sys/doc.go b/vendor/github.com/cilium/ebpf/internal/sys/doc.go
new file mode 100644
index 000000000..dfe174448
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/sys/doc.go
@@ -0,0 +1,6 @@
+// Package sys contains bindings for the BPF syscall.
+package sys
+
+// Regenerate types.go by invoking go generate in the current directory.
+
+//go:generate go run github.com/cilium/ebpf/internal/cmd/gentypes ../../btf/testdata/vmlinux.btf.gz
diff --git a/vendor/github.com/cilium/ebpf/internal/sys/fd.go b/vendor/github.com/cilium/ebpf/internal/sys/fd.go
new file mode 100644
index 000000000..941a56fb9
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/sys/fd.go
@@ -0,0 +1,133 @@
+package sys
+
+import (
+ "fmt"
+ "math"
+ "os"
+ "runtime"
+ "strconv"
+
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+var ErrClosedFd = unix.EBADF
+
+type FD struct {
+ raw int
+}
+
+func newFD(value int) *FD {
+ if onLeakFD != nil {
+ // Attempt to store the caller's stack for the given fd value.
+ // Panic if fds contains an existing stack for the fd.
+ old, exist := fds.LoadOrStore(value, callersFrames())
+ if exist {
+ f := old.(*runtime.Frames)
+ panic(fmt.Sprintf("found existing stack for fd %d:\n%s", value, FormatFrames(f)))
+ }
+ }
+
+ fd := &FD{value}
+ runtime.SetFinalizer(fd, (*FD).finalize)
+ return fd
+}
+
+// finalize is set as the FD's runtime finalizer and
+// sends a leak trace before calling FD.Close().
+func (fd *FD) finalize() {
+ if fd.raw < 0 {
+ return
+ }
+
+ // Invoke the fd leak callback. Calls LoadAndDelete to guarantee the callback
+ // is invoked at most once for one sys.FD allocation, runtime.Frames can only
+ // be unwound once.
+ f, ok := fds.LoadAndDelete(fd.Int())
+ if ok && onLeakFD != nil {
+ onLeakFD(f.(*runtime.Frames))
+ }
+
+ _ = fd.Close()
+}
+
+// NewFD wraps a raw fd with a finalizer.
+//
+// You must not use the raw fd after calling this function, since the underlying
+// file descriptor number may change. This is because the BPF UAPI assumes that
+// zero is not a valid fd value.
+func NewFD(value int) (*FD, error) {
+ if value < 0 {
+ return nil, fmt.Errorf("invalid fd %d", value)
+ }
+
+ fd := newFD(value)
+ if value != 0 {
+ return fd, nil
+ }
+
+ dup, err := fd.Dup()
+ _ = fd.Close()
+ return dup, err
+}
+
+func (fd *FD) String() string {
+ return strconv.FormatInt(int64(fd.raw), 10)
+}
+
+func (fd *FD) Int() int {
+ return fd.raw
+}
+
+func (fd *FD) Uint() uint32 {
+ if fd.raw < 0 || int64(fd.raw) > math.MaxUint32 {
+ // Best effort: this is the number most likely to be an invalid file
+ // descriptor. It is equal to -1 (on two's complement arches).
+ return math.MaxUint32
+ }
+ return uint32(fd.raw)
+}
+
+func (fd *FD) Close() error {
+ if fd.raw < 0 {
+ return nil
+ }
+
+ return unix.Close(fd.disown())
+}
+
+func (fd *FD) disown() int {
+ value := int(fd.raw)
+ fds.Delete(int(value))
+ fd.raw = -1
+
+ runtime.SetFinalizer(fd, nil)
+ return value
+}
+
+func (fd *FD) Dup() (*FD, error) {
+ if fd.raw < 0 {
+ return nil, ErrClosedFd
+ }
+
+ // Always require the fd to be larger than zero: the BPF API treats the value
+ // as "no argument provided".
+ dup, err := unix.FcntlInt(uintptr(fd.raw), unix.F_DUPFD_CLOEXEC, 1)
+ if err != nil {
+ return nil, fmt.Errorf("can't dup fd: %v", err)
+ }
+
+ return newFD(dup), nil
+}
+
+// File takes ownership of FD and turns it into an [*os.File].
+//
+// You must not use the FD after the call returns.
+//
+// Returns nil if the FD is not valid.
+func (fd *FD) File(name string) *os.File {
+ if fd.raw < 0 {
+ return nil
+ }
+
+ return os.NewFile(uintptr(fd.disown()), name)
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/sys/fd_trace.go b/vendor/github.com/cilium/ebpf/internal/sys/fd_trace.go
new file mode 100644
index 000000000..cd50dd1f6
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/sys/fd_trace.go
@@ -0,0 +1,93 @@
+package sys
+
+import (
+ "bytes"
+ "fmt"
+ "runtime"
+ "sync"
+)
+
+// OnLeakFD controls tracing [FD] lifetime to detect resources that are not
+// closed by Close().
+//
+// If fn is not nil, tracing is enabled for all FDs created going forward. fn is
+// invoked for all FDs that are closed by the garbage collector instead of an
+// explicit Close() by a caller. Calling OnLeakFD twice with a non-nil fn
+// (without disabling tracing in the meantime) will cause a panic.
+//
+// If fn is nil, tracing will be disabled. Any FDs that have not been closed are
+// considered to be leaked, fn will be invoked for them, and the process will be
+// terminated.
+//
+// fn will be invoked at most once for every unique sys.FD allocation since a
+// runtime.Frames can only be unwound once.
+func OnLeakFD(fn func(*runtime.Frames)) {
+ // Enable leak tracing if new fn is provided.
+ if fn != nil {
+ if onLeakFD != nil {
+ panic("OnLeakFD called twice with non-nil fn")
+ }
+
+ onLeakFD = fn
+ return
+ }
+
+ // fn is nil past this point.
+
+ if onLeakFD == nil {
+ return
+ }
+
+ // Call onLeakFD for all open fds.
+ if fs := flushFrames(); len(fs) != 0 {
+ for _, f := range fs {
+ onLeakFD(f)
+ }
+ }
+
+ onLeakFD = nil
+}
+
+var onLeakFD func(*runtime.Frames)
+
+// fds is a registry of all file descriptors wrapped into sys.fds that were
+// created while an fd tracer was active.
+var fds sync.Map // map[int]*runtime.Frames
+
+// flushFrames removes all elements from fds and returns them as a slice. This
+// deals with the fact that a runtime.Frames can only be unwound once using
+// Next().
+func flushFrames() []*runtime.Frames {
+ var frames []*runtime.Frames
+ fds.Range(func(key, value any) bool {
+ frames = append(frames, value.(*runtime.Frames))
+ fds.Delete(key)
+ return true
+ })
+ return frames
+}
+
+func callersFrames() *runtime.Frames {
+ c := make([]uintptr, 32)
+
+ // Skip runtime.Callers and this function.
+ i := runtime.Callers(2, c)
+ if i == 0 {
+ return nil
+ }
+
+ return runtime.CallersFrames(c)
+}
+
+// FormatFrames formats a runtime.Frames as a human-readable string.
+func FormatFrames(fs *runtime.Frames) string {
+ var b bytes.Buffer
+ for {
+ f, more := fs.Next()
+ b.WriteString(fmt.Sprintf("\t%s+%#x\n\t\t%s:%d\n", f.Function, f.PC-f.Entry, f.File, f.Line))
+ if !more {
+ break
+ }
+ }
+ return b.String()
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/sys/mapflags_string.go b/vendor/github.com/cilium/ebpf/internal/sys/mapflags_string.go
new file mode 100644
index 000000000..c80744ae0
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/sys/mapflags_string.go
@@ -0,0 +1,49 @@
+// Code generated by "stringer -type MapFlags"; DO NOT EDIT.
+
+package sys
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[BPF_F_NO_PREALLOC-1]
+ _ = x[BPF_F_NO_COMMON_LRU-2]
+ _ = x[BPF_F_NUMA_NODE-4]
+ _ = x[BPF_F_RDONLY-8]
+ _ = x[BPF_F_WRONLY-16]
+ _ = x[BPF_F_STACK_BUILD_ID-32]
+ _ = x[BPF_F_ZERO_SEED-64]
+ _ = x[BPF_F_RDONLY_PROG-128]
+ _ = x[BPF_F_WRONLY_PROG-256]
+ _ = x[BPF_F_CLONE-512]
+ _ = x[BPF_F_MMAPABLE-1024]
+ _ = x[BPF_F_PRESERVE_ELEMS-2048]
+ _ = x[BPF_F_INNER_MAP-4096]
+}
+
+const _MapFlags_name = "BPF_F_NO_PREALLOCBPF_F_NO_COMMON_LRUBPF_F_NUMA_NODEBPF_F_RDONLYBPF_F_WRONLYBPF_F_STACK_BUILD_IDBPF_F_ZERO_SEEDBPF_F_RDONLY_PROGBPF_F_WRONLY_PROGBPF_F_CLONEBPF_F_MMAPABLEBPF_F_PRESERVE_ELEMSBPF_F_INNER_MAP"
+
+var _MapFlags_map = map[MapFlags]string{
+ 1: _MapFlags_name[0:17],
+ 2: _MapFlags_name[17:36],
+ 4: _MapFlags_name[36:51],
+ 8: _MapFlags_name[51:63],
+ 16: _MapFlags_name[63:75],
+ 32: _MapFlags_name[75:95],
+ 64: _MapFlags_name[95:110],
+ 128: _MapFlags_name[110:127],
+ 256: _MapFlags_name[127:144],
+ 512: _MapFlags_name[144:155],
+ 1024: _MapFlags_name[155:169],
+ 2048: _MapFlags_name[169:189],
+ 4096: _MapFlags_name[189:204],
+}
+
+func (i MapFlags) String() string {
+ if str, ok := _MapFlags_map[i]; ok {
+ return str
+ }
+ return "MapFlags(" + strconv.FormatInt(int64(i), 10) + ")"
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/sys/ptr.go b/vendor/github.com/cilium/ebpf/internal/sys/ptr.go
new file mode 100644
index 000000000..e9bb59059
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/sys/ptr.go
@@ -0,0 +1,52 @@
+package sys
+
+import (
+ "unsafe"
+
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+// NewPointer creates a 64-bit pointer from an unsafe Pointer.
+func NewPointer(ptr unsafe.Pointer) Pointer {
+ return Pointer{ptr: ptr}
+}
+
+// NewSlicePointer creates a 64-bit pointer from a byte slice.
+func NewSlicePointer(buf []byte) Pointer {
+ if len(buf) == 0 {
+ return Pointer{}
+ }
+
+ return Pointer{ptr: unsafe.Pointer(&buf[0])}
+}
+
+// NewSlicePointerLen creates a 64-bit pointer from a byte slice.
+//
+// Useful to assign both the pointer and the length in one go.
+func NewSlicePointerLen(buf []byte) (Pointer, uint32) {
+ return NewSlicePointer(buf), uint32(len(buf))
+}
+
+// NewStringPointer creates a 64-bit pointer from a string.
+func NewStringPointer(str string) Pointer {
+ p, err := unix.BytePtrFromString(str)
+ if err != nil {
+ return Pointer{}
+ }
+
+ return Pointer{ptr: unsafe.Pointer(p)}
+}
+
+// NewStringSlicePointer allocates an array of Pointers to each string in the
+// given slice of strings and returns a 64-bit pointer to the start of the
+// resulting array.
+//
+// Use this function to pass arrays of strings as syscall arguments.
+func NewStringSlicePointer(strings []string) Pointer {
+ sp := make([]Pointer, 0, len(strings))
+ for _, s := range strings {
+ sp = append(sp, NewStringPointer(s))
+ }
+
+ return Pointer{ptr: unsafe.Pointer(&sp[0])}
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_be.go b/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_be.go
new file mode 100644
index 000000000..6278c79c9
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_be.go
@@ -0,0 +1,14 @@
+//go:build armbe || mips || mips64p32
+
+package sys
+
+import (
+ "unsafe"
+)
+
+// Pointer wraps an unsafe.Pointer to be 64bit to
+// conform to the syscall specification.
+type Pointer struct {
+ pad uint32
+ ptr unsafe.Pointer
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_le.go b/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_le.go
new file mode 100644
index 000000000..c27b537e8
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_le.go
@@ -0,0 +1,14 @@
+//go:build 386 || amd64p32 || arm || mipsle || mips64p32le
+
+package sys
+
+import (
+ "unsafe"
+)
+
+// Pointer wraps an unsafe.Pointer to be 64bit to
+// conform to the syscall specification.
+type Pointer struct {
+ ptr unsafe.Pointer
+ pad uint32
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/sys/ptr_64.go b/vendor/github.com/cilium/ebpf/internal/sys/ptr_64.go
new file mode 100644
index 000000000..2d7828230
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/sys/ptr_64.go
@@ -0,0 +1,13 @@
+//go:build !386 && !amd64p32 && !arm && !mipsle && !mips64p32le && !armbe && !mips && !mips64p32
+
+package sys
+
+import (
+ "unsafe"
+)
+
+// Pointer wraps an unsafe.Pointer to be 64bit to
+// conform to the syscall specification.
+type Pointer struct {
+ ptr unsafe.Pointer
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/sys/signals.go b/vendor/github.com/cilium/ebpf/internal/sys/signals.go
new file mode 100644
index 000000000..7494c030c
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/sys/signals.go
@@ -0,0 +1,83 @@
+package sys
+
+import (
+ "fmt"
+ "runtime"
+ "unsafe"
+
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+// A sigset containing only SIGPROF.
+var profSet unix.Sigset_t
+
+func init() {
+ // See sigsetAdd for details on the implementation. Open coded here so
+ // that the compiler will check the constant calculations for us.
+ profSet.Val[sigprofBit/wordBits] |= 1 << (sigprofBit % wordBits)
+}
+
+// maskProfilerSignal locks the calling goroutine to its underlying OS thread
+// and adds SIGPROF to the thread's signal mask. This prevents pprof from
+// interrupting expensive syscalls like e.g. BPF_PROG_LOAD.
+//
+// The caller must defer unmaskProfilerSignal() to reverse the operation.
+func maskProfilerSignal() {
+ runtime.LockOSThread()
+
+ if err := unix.PthreadSigmask(unix.SIG_BLOCK, &profSet, nil); err != nil {
+ runtime.UnlockOSThread()
+ panic(fmt.Errorf("masking profiler signal: %w", err))
+ }
+}
+
+// unmaskProfilerSignal removes SIGPROF from the underlying thread's signal
+// mask, allowing it to be interrupted for profiling once again.
+//
+// It also unlocks the current goroutine from its underlying OS thread.
+func unmaskProfilerSignal() {
+ defer runtime.UnlockOSThread()
+
+ if err := unix.PthreadSigmask(unix.SIG_UNBLOCK, &profSet, nil); err != nil {
+ panic(fmt.Errorf("unmasking profiler signal: %w", err))
+ }
+}
+
+const (
+ // Signal is the nth bit in the bitfield.
+ sigprofBit = int(unix.SIGPROF - 1)
+ // The number of bits in one Sigset_t word.
+ wordBits = int(unsafe.Sizeof(unix.Sigset_t{}.Val[0])) * 8
+)
+
+// sigsetAdd adds signal to set.
+//
+// Note: Sigset_t.Val's value type is uint32 or uint64 depending on the arch.
+// This function must be able to deal with both and so must avoid any direct
+// references to u32 or u64 types.
+func sigsetAdd(set *unix.Sigset_t, signal unix.Signal) error {
+ if signal < 1 {
+ return fmt.Errorf("signal %d must be larger than 0", signal)
+ }
+
+ // For amd64, runtime.sigaddset() performs the following operation:
+ // set[(signal-1)/32] |= 1 << ((uint32(signal) - 1) & 31)
+ //
+ // This trick depends on sigset being two u32's, causing a signal in the the
+ // bottom 31 bits to be written to the low word if bit 32 is low, or the high
+ // word if bit 32 is high.
+
+ // Signal is the nth bit in the bitfield.
+ bit := int(signal - 1)
+ // Word within the sigset the bit needs to be written to.
+ word := bit / wordBits
+
+ if word >= len(set.Val) {
+ return fmt.Errorf("signal %d does not fit within unix.Sigset_t", signal)
+ }
+
+ // Write the signal bit into its corresponding word at the corrected offset.
+ set.Val[word] |= 1 << (bit % wordBits)
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/sys/syscall.go b/vendor/github.com/cilium/ebpf/internal/sys/syscall.go
new file mode 100644
index 000000000..088e82eea
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/sys/syscall.go
@@ -0,0 +1,178 @@
+package sys
+
+import (
+ "runtime"
+ "syscall"
+ "unsafe"
+
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+// ENOTSUPP is a Linux internal error code that has leaked into UAPI.
+//
+// It is not the same as ENOTSUP or EOPNOTSUPP.
+const ENOTSUPP = syscall.Errno(524)
+
+// BPF wraps SYS_BPF.
+//
+// Any pointers contained in attr must use the Pointer type from this package.
+func BPF(cmd Cmd, attr unsafe.Pointer, size uintptr) (uintptr, error) {
+ // Prevent the Go profiler from repeatedly interrupting the verifier,
+ // which could otherwise lead to a livelock due to receiving EAGAIN.
+ if cmd == BPF_PROG_LOAD || cmd == BPF_PROG_RUN {
+ maskProfilerSignal()
+ defer unmaskProfilerSignal()
+ }
+
+ for {
+ r1, _, errNo := unix.Syscall(unix.SYS_BPF, uintptr(cmd), uintptr(attr), size)
+ runtime.KeepAlive(attr)
+
+ // As of ~4.20 the verifier can be interrupted by a signal,
+ // and returns EAGAIN in that case.
+ if errNo == unix.EAGAIN && cmd == BPF_PROG_LOAD {
+ continue
+ }
+
+ var err error
+ if errNo != 0 {
+ err = wrappedErrno{errNo}
+ }
+
+ return r1, err
+ }
+}
+
+// Info is implemented by all structs that can be passed to the ObjInfo syscall.
+//
+// MapInfo
+// ProgInfo
+// LinkInfo
+// BtfInfo
+type Info interface {
+ info() (unsafe.Pointer, uint32)
+}
+
+var _ Info = (*MapInfo)(nil)
+
+func (i *MapInfo) info() (unsafe.Pointer, uint32) {
+ return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i))
+}
+
+var _ Info = (*ProgInfo)(nil)
+
+func (i *ProgInfo) info() (unsafe.Pointer, uint32) {
+ return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i))
+}
+
+var _ Info = (*LinkInfo)(nil)
+
+func (i *LinkInfo) info() (unsafe.Pointer, uint32) {
+ return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i))
+}
+
+var _ Info = (*BtfInfo)(nil)
+
+func (i *BtfInfo) info() (unsafe.Pointer, uint32) {
+ return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i))
+}
+
+// ObjInfo retrieves information about a BPF Fd.
+//
+// info may be one of MapInfo, ProgInfo, LinkInfo and BtfInfo.
+func ObjInfo(fd *FD, info Info) error {
+ ptr, len := info.info()
+ err := ObjGetInfoByFd(&ObjGetInfoByFdAttr{
+ BpfFd: fd.Uint(),
+ InfoLen: len,
+ Info: NewPointer(ptr),
+ })
+ runtime.KeepAlive(fd)
+ return err
+}
+
+// BPFObjName is a null-terminated string made up of
+// 'A-Za-z0-9_' characters.
+type ObjName [unix.BPF_OBJ_NAME_LEN]byte
+
+// NewObjName truncates the result if it is too long.
+func NewObjName(name string) ObjName {
+ var result ObjName
+ copy(result[:unix.BPF_OBJ_NAME_LEN-1], name)
+ return result
+}
+
+// LogLevel controls the verbosity of the kernel's eBPF program verifier.
+type LogLevel uint32
+
+const (
+ BPF_LOG_LEVEL1 LogLevel = 1 << iota
+ BPF_LOG_LEVEL2
+ BPF_LOG_STATS
+)
+
+// LinkID uniquely identifies a bpf_link.
+type LinkID uint32
+
+// BTFID uniquely identifies a BTF blob loaded into the kernel.
+type BTFID uint32
+
+// TypeID identifies a type in a BTF blob.
+type TypeID uint32
+
+// MapFlags control map behaviour.
+type MapFlags uint32
+
+//go:generate go run golang.org/x/tools/cmd/stringer@latest -type MapFlags
+
+const (
+ BPF_F_NO_PREALLOC MapFlags = 1 << iota
+ BPF_F_NO_COMMON_LRU
+ BPF_F_NUMA_NODE
+ BPF_F_RDONLY
+ BPF_F_WRONLY
+ BPF_F_STACK_BUILD_ID
+ BPF_F_ZERO_SEED
+ BPF_F_RDONLY_PROG
+ BPF_F_WRONLY_PROG
+ BPF_F_CLONE
+ BPF_F_MMAPABLE
+ BPF_F_PRESERVE_ELEMS
+ BPF_F_INNER_MAP
+)
+
+// wrappedErrno wraps syscall.Errno to prevent direct comparisons with
+// syscall.E* or unix.E* constants.
+//
+// You should never export an error of this type.
+type wrappedErrno struct {
+ syscall.Errno
+}
+
+func (we wrappedErrno) Unwrap() error {
+ return we.Errno
+}
+
+func (we wrappedErrno) Error() string {
+ if we.Errno == ENOTSUPP {
+ return "operation not supported"
+ }
+ return we.Errno.Error()
+}
+
+type syscallError struct {
+ error
+ errno syscall.Errno
+}
+
+func Error(err error, errno syscall.Errno) error {
+ return &syscallError{err, errno}
+}
+
+func (se *syscallError) Is(target error) bool {
+ return target == se.error
+}
+
+func (se *syscallError) Unwrap() error {
+ return se.errno
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/sys/types.go b/vendor/github.com/cilium/ebpf/internal/sys/types.go
new file mode 100644
index 000000000..b7e3244ad
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/sys/types.go
@@ -0,0 +1,1129 @@
+// Code generated by internal/cmd/gentypes; DO NOT EDIT.
+
+package sys
+
+import (
+ "unsafe"
+)
+
+type AdjRoomMode uint32
+
+const (
+ BPF_ADJ_ROOM_NET AdjRoomMode = 0
+ BPF_ADJ_ROOM_MAC AdjRoomMode = 1
+)
+
+type AttachType uint32
+
+const (
+ BPF_CGROUP_INET_INGRESS AttachType = 0
+ BPF_CGROUP_INET_EGRESS AttachType = 1
+ BPF_CGROUP_INET_SOCK_CREATE AttachType = 2
+ BPF_CGROUP_SOCK_OPS AttachType = 3
+ BPF_SK_SKB_STREAM_PARSER AttachType = 4
+ BPF_SK_SKB_STREAM_VERDICT AttachType = 5
+ BPF_CGROUP_DEVICE AttachType = 6
+ BPF_SK_MSG_VERDICT AttachType = 7
+ BPF_CGROUP_INET4_BIND AttachType = 8
+ BPF_CGROUP_INET6_BIND AttachType = 9
+ BPF_CGROUP_INET4_CONNECT AttachType = 10
+ BPF_CGROUP_INET6_CONNECT AttachType = 11
+ BPF_CGROUP_INET4_POST_BIND AttachType = 12
+ BPF_CGROUP_INET6_POST_BIND AttachType = 13
+ BPF_CGROUP_UDP4_SENDMSG AttachType = 14
+ BPF_CGROUP_UDP6_SENDMSG AttachType = 15
+ BPF_LIRC_MODE2 AttachType = 16
+ BPF_FLOW_DISSECTOR AttachType = 17
+ BPF_CGROUP_SYSCTL AttachType = 18
+ BPF_CGROUP_UDP4_RECVMSG AttachType = 19
+ BPF_CGROUP_UDP6_RECVMSG AttachType = 20
+ BPF_CGROUP_GETSOCKOPT AttachType = 21
+ BPF_CGROUP_SETSOCKOPT AttachType = 22
+ BPF_TRACE_RAW_TP AttachType = 23
+ BPF_TRACE_FENTRY AttachType = 24
+ BPF_TRACE_FEXIT AttachType = 25
+ BPF_MODIFY_RETURN AttachType = 26
+ BPF_LSM_MAC AttachType = 27
+ BPF_TRACE_ITER AttachType = 28
+ BPF_CGROUP_INET4_GETPEERNAME AttachType = 29
+ BPF_CGROUP_INET6_GETPEERNAME AttachType = 30
+ BPF_CGROUP_INET4_GETSOCKNAME AttachType = 31
+ BPF_CGROUP_INET6_GETSOCKNAME AttachType = 32
+ BPF_XDP_DEVMAP AttachType = 33
+ BPF_CGROUP_INET_SOCK_RELEASE AttachType = 34
+ BPF_XDP_CPUMAP AttachType = 35
+ BPF_SK_LOOKUP AttachType = 36
+ BPF_XDP AttachType = 37
+ BPF_SK_SKB_VERDICT AttachType = 38
+ BPF_SK_REUSEPORT_SELECT AttachType = 39
+ BPF_SK_REUSEPORT_SELECT_OR_MIGRATE AttachType = 40
+ BPF_PERF_EVENT AttachType = 41
+ BPF_TRACE_KPROBE_MULTI AttachType = 42
+ BPF_LSM_CGROUP AttachType = 43
+ __MAX_BPF_ATTACH_TYPE AttachType = 44
+)
+
+type Cmd uint32
+
+const (
+ BPF_MAP_CREATE Cmd = 0
+ BPF_MAP_LOOKUP_ELEM Cmd = 1
+ BPF_MAP_UPDATE_ELEM Cmd = 2
+ BPF_MAP_DELETE_ELEM Cmd = 3
+ BPF_MAP_GET_NEXT_KEY Cmd = 4
+ BPF_PROG_LOAD Cmd = 5
+ BPF_OBJ_PIN Cmd = 6
+ BPF_OBJ_GET Cmd = 7
+ BPF_PROG_ATTACH Cmd = 8
+ BPF_PROG_DETACH Cmd = 9
+ BPF_PROG_TEST_RUN Cmd = 10
+ BPF_PROG_RUN Cmd = 10
+ BPF_PROG_GET_NEXT_ID Cmd = 11
+ BPF_MAP_GET_NEXT_ID Cmd = 12
+ BPF_PROG_GET_FD_BY_ID Cmd = 13
+ BPF_MAP_GET_FD_BY_ID Cmd = 14
+ BPF_OBJ_GET_INFO_BY_FD Cmd = 15
+ BPF_PROG_QUERY Cmd = 16
+ BPF_RAW_TRACEPOINT_OPEN Cmd = 17
+ BPF_BTF_LOAD Cmd = 18
+ BPF_BTF_GET_FD_BY_ID Cmd = 19
+ BPF_TASK_FD_QUERY Cmd = 20
+ BPF_MAP_LOOKUP_AND_DELETE_ELEM Cmd = 21
+ BPF_MAP_FREEZE Cmd = 22
+ BPF_BTF_GET_NEXT_ID Cmd = 23
+ BPF_MAP_LOOKUP_BATCH Cmd = 24
+ BPF_MAP_LOOKUP_AND_DELETE_BATCH Cmd = 25
+ BPF_MAP_UPDATE_BATCH Cmd = 26
+ BPF_MAP_DELETE_BATCH Cmd = 27
+ BPF_LINK_CREATE Cmd = 28
+ BPF_LINK_UPDATE Cmd = 29
+ BPF_LINK_GET_FD_BY_ID Cmd = 30
+ BPF_LINK_GET_NEXT_ID Cmd = 31
+ BPF_ENABLE_STATS Cmd = 32
+ BPF_ITER_CREATE Cmd = 33
+ BPF_LINK_DETACH Cmd = 34
+ BPF_PROG_BIND_MAP Cmd = 35
+)
+
+type FunctionId uint32
+
+const (
+ BPF_FUNC_unspec FunctionId = 0
+ BPF_FUNC_map_lookup_elem FunctionId = 1
+ BPF_FUNC_map_update_elem FunctionId = 2
+ BPF_FUNC_map_delete_elem FunctionId = 3
+ BPF_FUNC_probe_read FunctionId = 4
+ BPF_FUNC_ktime_get_ns FunctionId = 5
+ BPF_FUNC_trace_printk FunctionId = 6
+ BPF_FUNC_get_prandom_u32 FunctionId = 7
+ BPF_FUNC_get_smp_processor_id FunctionId = 8
+ BPF_FUNC_skb_store_bytes FunctionId = 9
+ BPF_FUNC_l3_csum_replace FunctionId = 10
+ BPF_FUNC_l4_csum_replace FunctionId = 11
+ BPF_FUNC_tail_call FunctionId = 12
+ BPF_FUNC_clone_redirect FunctionId = 13
+ BPF_FUNC_get_current_pid_tgid FunctionId = 14
+ BPF_FUNC_get_current_uid_gid FunctionId = 15
+ BPF_FUNC_get_current_comm FunctionId = 16
+ BPF_FUNC_get_cgroup_classid FunctionId = 17
+ BPF_FUNC_skb_vlan_push FunctionId = 18
+ BPF_FUNC_skb_vlan_pop FunctionId = 19
+ BPF_FUNC_skb_get_tunnel_key FunctionId = 20
+ BPF_FUNC_skb_set_tunnel_key FunctionId = 21
+ BPF_FUNC_perf_event_read FunctionId = 22
+ BPF_FUNC_redirect FunctionId = 23
+ BPF_FUNC_get_route_realm FunctionId = 24
+ BPF_FUNC_perf_event_output FunctionId = 25
+ BPF_FUNC_skb_load_bytes FunctionId = 26
+ BPF_FUNC_get_stackid FunctionId = 27
+ BPF_FUNC_csum_diff FunctionId = 28
+ BPF_FUNC_skb_get_tunnel_opt FunctionId = 29
+ BPF_FUNC_skb_set_tunnel_opt FunctionId = 30
+ BPF_FUNC_skb_change_proto FunctionId = 31
+ BPF_FUNC_skb_change_type FunctionId = 32
+ BPF_FUNC_skb_under_cgroup FunctionId = 33
+ BPF_FUNC_get_hash_recalc FunctionId = 34
+ BPF_FUNC_get_current_task FunctionId = 35
+ BPF_FUNC_probe_write_user FunctionId = 36
+ BPF_FUNC_current_task_under_cgroup FunctionId = 37
+ BPF_FUNC_skb_change_tail FunctionId = 38
+ BPF_FUNC_skb_pull_data FunctionId = 39
+ BPF_FUNC_csum_update FunctionId = 40
+ BPF_FUNC_set_hash_invalid FunctionId = 41
+ BPF_FUNC_get_numa_node_id FunctionId = 42
+ BPF_FUNC_skb_change_head FunctionId = 43
+ BPF_FUNC_xdp_adjust_head FunctionId = 44
+ BPF_FUNC_probe_read_str FunctionId = 45
+ BPF_FUNC_get_socket_cookie FunctionId = 46
+ BPF_FUNC_get_socket_uid FunctionId = 47
+ BPF_FUNC_set_hash FunctionId = 48
+ BPF_FUNC_setsockopt FunctionId = 49
+ BPF_FUNC_skb_adjust_room FunctionId = 50
+ BPF_FUNC_redirect_map FunctionId = 51
+ BPF_FUNC_sk_redirect_map FunctionId = 52
+ BPF_FUNC_sock_map_update FunctionId = 53
+ BPF_FUNC_xdp_adjust_meta FunctionId = 54
+ BPF_FUNC_perf_event_read_value FunctionId = 55
+ BPF_FUNC_perf_prog_read_value FunctionId = 56
+ BPF_FUNC_getsockopt FunctionId = 57
+ BPF_FUNC_override_return FunctionId = 58
+ BPF_FUNC_sock_ops_cb_flags_set FunctionId = 59
+ BPF_FUNC_msg_redirect_map FunctionId = 60
+ BPF_FUNC_msg_apply_bytes FunctionId = 61
+ BPF_FUNC_msg_cork_bytes FunctionId = 62
+ BPF_FUNC_msg_pull_data FunctionId = 63
+ BPF_FUNC_bind FunctionId = 64
+ BPF_FUNC_xdp_adjust_tail FunctionId = 65
+ BPF_FUNC_skb_get_xfrm_state FunctionId = 66
+ BPF_FUNC_get_stack FunctionId = 67
+ BPF_FUNC_skb_load_bytes_relative FunctionId = 68
+ BPF_FUNC_fib_lookup FunctionId = 69
+ BPF_FUNC_sock_hash_update FunctionId = 70
+ BPF_FUNC_msg_redirect_hash FunctionId = 71
+ BPF_FUNC_sk_redirect_hash FunctionId = 72
+ BPF_FUNC_lwt_push_encap FunctionId = 73
+ BPF_FUNC_lwt_seg6_store_bytes FunctionId = 74
+ BPF_FUNC_lwt_seg6_adjust_srh FunctionId = 75
+ BPF_FUNC_lwt_seg6_action FunctionId = 76
+ BPF_FUNC_rc_repeat FunctionId = 77
+ BPF_FUNC_rc_keydown FunctionId = 78
+ BPF_FUNC_skb_cgroup_id FunctionId = 79
+ BPF_FUNC_get_current_cgroup_id FunctionId = 80
+ BPF_FUNC_get_local_storage FunctionId = 81
+ BPF_FUNC_sk_select_reuseport FunctionId = 82
+ BPF_FUNC_skb_ancestor_cgroup_id FunctionId = 83
+ BPF_FUNC_sk_lookup_tcp FunctionId = 84
+ BPF_FUNC_sk_lookup_udp FunctionId = 85
+ BPF_FUNC_sk_release FunctionId = 86
+ BPF_FUNC_map_push_elem FunctionId = 87
+ BPF_FUNC_map_pop_elem FunctionId = 88
+ BPF_FUNC_map_peek_elem FunctionId = 89
+ BPF_FUNC_msg_push_data FunctionId = 90
+ BPF_FUNC_msg_pop_data FunctionId = 91
+ BPF_FUNC_rc_pointer_rel FunctionId = 92
+ BPF_FUNC_spin_lock FunctionId = 93
+ BPF_FUNC_spin_unlock FunctionId = 94
+ BPF_FUNC_sk_fullsock FunctionId = 95
+ BPF_FUNC_tcp_sock FunctionId = 96
+ BPF_FUNC_skb_ecn_set_ce FunctionId = 97
+ BPF_FUNC_get_listener_sock FunctionId = 98
+ BPF_FUNC_skc_lookup_tcp FunctionId = 99
+ BPF_FUNC_tcp_check_syncookie FunctionId = 100
+ BPF_FUNC_sysctl_get_name FunctionId = 101
+ BPF_FUNC_sysctl_get_current_value FunctionId = 102
+ BPF_FUNC_sysctl_get_new_value FunctionId = 103
+ BPF_FUNC_sysctl_set_new_value FunctionId = 104
+ BPF_FUNC_strtol FunctionId = 105
+ BPF_FUNC_strtoul FunctionId = 106
+ BPF_FUNC_sk_storage_get FunctionId = 107
+ BPF_FUNC_sk_storage_delete FunctionId = 108
+ BPF_FUNC_send_signal FunctionId = 109
+ BPF_FUNC_tcp_gen_syncookie FunctionId = 110
+ BPF_FUNC_skb_output FunctionId = 111
+ BPF_FUNC_probe_read_user FunctionId = 112
+ BPF_FUNC_probe_read_kernel FunctionId = 113
+ BPF_FUNC_probe_read_user_str FunctionId = 114
+ BPF_FUNC_probe_read_kernel_str FunctionId = 115
+ BPF_FUNC_tcp_send_ack FunctionId = 116
+ BPF_FUNC_send_signal_thread FunctionId = 117
+ BPF_FUNC_jiffies64 FunctionId = 118
+ BPF_FUNC_read_branch_records FunctionId = 119
+ BPF_FUNC_get_ns_current_pid_tgid FunctionId = 120
+ BPF_FUNC_xdp_output FunctionId = 121
+ BPF_FUNC_get_netns_cookie FunctionId = 122
+ BPF_FUNC_get_current_ancestor_cgroup_id FunctionId = 123
+ BPF_FUNC_sk_assign FunctionId = 124
+ BPF_FUNC_ktime_get_boot_ns FunctionId = 125
+ BPF_FUNC_seq_printf FunctionId = 126
+ BPF_FUNC_seq_write FunctionId = 127
+ BPF_FUNC_sk_cgroup_id FunctionId = 128
+ BPF_FUNC_sk_ancestor_cgroup_id FunctionId = 129
+ BPF_FUNC_ringbuf_output FunctionId = 130
+ BPF_FUNC_ringbuf_reserve FunctionId = 131
+ BPF_FUNC_ringbuf_submit FunctionId = 132
+ BPF_FUNC_ringbuf_discard FunctionId = 133
+ BPF_FUNC_ringbuf_query FunctionId = 134
+ BPF_FUNC_csum_level FunctionId = 135
+ BPF_FUNC_skc_to_tcp6_sock FunctionId = 136
+ BPF_FUNC_skc_to_tcp_sock FunctionId = 137
+ BPF_FUNC_skc_to_tcp_timewait_sock FunctionId = 138
+ BPF_FUNC_skc_to_tcp_request_sock FunctionId = 139
+ BPF_FUNC_skc_to_udp6_sock FunctionId = 140
+ BPF_FUNC_get_task_stack FunctionId = 141
+ BPF_FUNC_load_hdr_opt FunctionId = 142
+ BPF_FUNC_store_hdr_opt FunctionId = 143
+ BPF_FUNC_reserve_hdr_opt FunctionId = 144
+ BPF_FUNC_inode_storage_get FunctionId = 145
+ BPF_FUNC_inode_storage_delete FunctionId = 146
+ BPF_FUNC_d_path FunctionId = 147
+ BPF_FUNC_copy_from_user FunctionId = 148
+ BPF_FUNC_snprintf_btf FunctionId = 149
+ BPF_FUNC_seq_printf_btf FunctionId = 150
+ BPF_FUNC_skb_cgroup_classid FunctionId = 151
+ BPF_FUNC_redirect_neigh FunctionId = 152
+ BPF_FUNC_per_cpu_ptr FunctionId = 153
+ BPF_FUNC_this_cpu_ptr FunctionId = 154
+ BPF_FUNC_redirect_peer FunctionId = 155
+ BPF_FUNC_task_storage_get FunctionId = 156
+ BPF_FUNC_task_storage_delete FunctionId = 157
+ BPF_FUNC_get_current_task_btf FunctionId = 158
+ BPF_FUNC_bprm_opts_set FunctionId = 159
+ BPF_FUNC_ktime_get_coarse_ns FunctionId = 160
+ BPF_FUNC_ima_inode_hash FunctionId = 161
+ BPF_FUNC_sock_from_file FunctionId = 162
+ BPF_FUNC_check_mtu FunctionId = 163
+ BPF_FUNC_for_each_map_elem FunctionId = 164
+ BPF_FUNC_snprintf FunctionId = 165
+ BPF_FUNC_sys_bpf FunctionId = 166
+ BPF_FUNC_btf_find_by_name_kind FunctionId = 167
+ BPF_FUNC_sys_close FunctionId = 168
+ BPF_FUNC_timer_init FunctionId = 169
+ BPF_FUNC_timer_set_callback FunctionId = 170
+ BPF_FUNC_timer_start FunctionId = 171
+ BPF_FUNC_timer_cancel FunctionId = 172
+ BPF_FUNC_get_func_ip FunctionId = 173
+ BPF_FUNC_get_attach_cookie FunctionId = 174
+ BPF_FUNC_task_pt_regs FunctionId = 175
+ BPF_FUNC_get_branch_snapshot FunctionId = 176
+ BPF_FUNC_trace_vprintk FunctionId = 177
+ BPF_FUNC_skc_to_unix_sock FunctionId = 178
+ BPF_FUNC_kallsyms_lookup_name FunctionId = 179
+ BPF_FUNC_find_vma FunctionId = 180
+ BPF_FUNC_loop FunctionId = 181
+ BPF_FUNC_strncmp FunctionId = 182
+ BPF_FUNC_get_func_arg FunctionId = 183
+ BPF_FUNC_get_func_ret FunctionId = 184
+ BPF_FUNC_get_func_arg_cnt FunctionId = 185
+ BPF_FUNC_get_retval FunctionId = 186
+ BPF_FUNC_set_retval FunctionId = 187
+ BPF_FUNC_xdp_get_buff_len FunctionId = 188
+ BPF_FUNC_xdp_load_bytes FunctionId = 189
+ BPF_FUNC_xdp_store_bytes FunctionId = 190
+ BPF_FUNC_copy_from_user_task FunctionId = 191
+ BPF_FUNC_skb_set_tstamp FunctionId = 192
+ BPF_FUNC_ima_file_hash FunctionId = 193
+ BPF_FUNC_kptr_xchg FunctionId = 194
+ BPF_FUNC_map_lookup_percpu_elem FunctionId = 195
+ BPF_FUNC_skc_to_mptcp_sock FunctionId = 196
+ BPF_FUNC_dynptr_from_mem FunctionId = 197
+ BPF_FUNC_ringbuf_reserve_dynptr FunctionId = 198
+ BPF_FUNC_ringbuf_submit_dynptr FunctionId = 199
+ BPF_FUNC_ringbuf_discard_dynptr FunctionId = 200
+ BPF_FUNC_dynptr_read FunctionId = 201
+ BPF_FUNC_dynptr_write FunctionId = 202
+ BPF_FUNC_dynptr_data FunctionId = 203
+ BPF_FUNC_tcp_raw_gen_syncookie_ipv4 FunctionId = 204
+ BPF_FUNC_tcp_raw_gen_syncookie_ipv6 FunctionId = 205
+ BPF_FUNC_tcp_raw_check_syncookie_ipv4 FunctionId = 206
+ BPF_FUNC_tcp_raw_check_syncookie_ipv6 FunctionId = 207
+ BPF_FUNC_ktime_get_tai_ns FunctionId = 208
+ BPF_FUNC_user_ringbuf_drain FunctionId = 209
+ __BPF_FUNC_MAX_ID FunctionId = 210
+)
+
+type HdrStartOff uint32
+
+const (
+ BPF_HDR_START_MAC HdrStartOff = 0
+ BPF_HDR_START_NET HdrStartOff = 1
+)
+
+type LinkType uint32
+
+const (
+ BPF_LINK_TYPE_UNSPEC LinkType = 0
+ BPF_LINK_TYPE_RAW_TRACEPOINT LinkType = 1
+ BPF_LINK_TYPE_TRACING LinkType = 2
+ BPF_LINK_TYPE_CGROUP LinkType = 3
+ BPF_LINK_TYPE_ITER LinkType = 4
+ BPF_LINK_TYPE_NETNS LinkType = 5
+ BPF_LINK_TYPE_XDP LinkType = 6
+ BPF_LINK_TYPE_PERF_EVENT LinkType = 7
+ BPF_LINK_TYPE_KPROBE_MULTI LinkType = 8
+ BPF_LINK_TYPE_STRUCT_OPS LinkType = 9
+ MAX_BPF_LINK_TYPE LinkType = 10
+)
+
+type MapType uint32
+
+const (
+ BPF_MAP_TYPE_UNSPEC MapType = 0
+ BPF_MAP_TYPE_HASH MapType = 1
+ BPF_MAP_TYPE_ARRAY MapType = 2
+ BPF_MAP_TYPE_PROG_ARRAY MapType = 3
+ BPF_MAP_TYPE_PERF_EVENT_ARRAY MapType = 4
+ BPF_MAP_TYPE_PERCPU_HASH MapType = 5
+ BPF_MAP_TYPE_PERCPU_ARRAY MapType = 6
+ BPF_MAP_TYPE_STACK_TRACE MapType = 7
+ BPF_MAP_TYPE_CGROUP_ARRAY MapType = 8
+ BPF_MAP_TYPE_LRU_HASH MapType = 9
+ BPF_MAP_TYPE_LRU_PERCPU_HASH MapType = 10
+ BPF_MAP_TYPE_LPM_TRIE MapType = 11
+ BPF_MAP_TYPE_ARRAY_OF_MAPS MapType = 12
+ BPF_MAP_TYPE_HASH_OF_MAPS MapType = 13
+ BPF_MAP_TYPE_DEVMAP MapType = 14
+ BPF_MAP_TYPE_SOCKMAP MapType = 15
+ BPF_MAP_TYPE_CPUMAP MapType = 16
+ BPF_MAP_TYPE_XSKMAP MapType = 17
+ BPF_MAP_TYPE_SOCKHASH MapType = 18
+ BPF_MAP_TYPE_CGROUP_STORAGE MapType = 19
+ BPF_MAP_TYPE_REUSEPORT_SOCKARRAY MapType = 20
+ BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE MapType = 21
+ BPF_MAP_TYPE_QUEUE MapType = 22
+ BPF_MAP_TYPE_STACK MapType = 23
+ BPF_MAP_TYPE_SK_STORAGE MapType = 24
+ BPF_MAP_TYPE_DEVMAP_HASH MapType = 25
+ BPF_MAP_TYPE_STRUCT_OPS MapType = 26
+ BPF_MAP_TYPE_RINGBUF MapType = 27
+ BPF_MAP_TYPE_INODE_STORAGE MapType = 28
+ BPF_MAP_TYPE_TASK_STORAGE MapType = 29
+ BPF_MAP_TYPE_BLOOM_FILTER MapType = 30
+ BPF_MAP_TYPE_USER_RINGBUF MapType = 31
+)
+
+type ProgType uint32
+
+const (
+ BPF_PROG_TYPE_UNSPEC ProgType = 0
+ BPF_PROG_TYPE_SOCKET_FILTER ProgType = 1
+ BPF_PROG_TYPE_KPROBE ProgType = 2
+ BPF_PROG_TYPE_SCHED_CLS ProgType = 3
+ BPF_PROG_TYPE_SCHED_ACT ProgType = 4
+ BPF_PROG_TYPE_TRACEPOINT ProgType = 5
+ BPF_PROG_TYPE_XDP ProgType = 6
+ BPF_PROG_TYPE_PERF_EVENT ProgType = 7
+ BPF_PROG_TYPE_CGROUP_SKB ProgType = 8
+ BPF_PROG_TYPE_CGROUP_SOCK ProgType = 9
+ BPF_PROG_TYPE_LWT_IN ProgType = 10
+ BPF_PROG_TYPE_LWT_OUT ProgType = 11
+ BPF_PROG_TYPE_LWT_XMIT ProgType = 12
+ BPF_PROG_TYPE_SOCK_OPS ProgType = 13
+ BPF_PROG_TYPE_SK_SKB ProgType = 14
+ BPF_PROG_TYPE_CGROUP_DEVICE ProgType = 15
+ BPF_PROG_TYPE_SK_MSG ProgType = 16
+ BPF_PROG_TYPE_RAW_TRACEPOINT ProgType = 17
+ BPF_PROG_TYPE_CGROUP_SOCK_ADDR ProgType = 18
+ BPF_PROG_TYPE_LWT_SEG6LOCAL ProgType = 19
+ BPF_PROG_TYPE_LIRC_MODE2 ProgType = 20
+ BPF_PROG_TYPE_SK_REUSEPORT ProgType = 21
+ BPF_PROG_TYPE_FLOW_DISSECTOR ProgType = 22
+ BPF_PROG_TYPE_CGROUP_SYSCTL ProgType = 23
+ BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE ProgType = 24
+ BPF_PROG_TYPE_CGROUP_SOCKOPT ProgType = 25
+ BPF_PROG_TYPE_TRACING ProgType = 26
+ BPF_PROG_TYPE_STRUCT_OPS ProgType = 27
+ BPF_PROG_TYPE_EXT ProgType = 28
+ BPF_PROG_TYPE_LSM ProgType = 29
+ BPF_PROG_TYPE_SK_LOOKUP ProgType = 30
+ BPF_PROG_TYPE_SYSCALL ProgType = 31
+)
+
+type RetCode uint32
+
+const (
+ BPF_OK RetCode = 0
+ BPF_DROP RetCode = 2
+ BPF_REDIRECT RetCode = 7
+ BPF_LWT_REROUTE RetCode = 128
+ BPF_FLOW_DISSECTOR_CONTINUE RetCode = 129
+)
+
+type SkAction uint32
+
+const (
+ SK_DROP SkAction = 0
+ SK_PASS SkAction = 1
+)
+
+type StackBuildIdStatus uint32
+
+const (
+ BPF_STACK_BUILD_ID_EMPTY StackBuildIdStatus = 0
+ BPF_STACK_BUILD_ID_VALID StackBuildIdStatus = 1
+ BPF_STACK_BUILD_ID_IP StackBuildIdStatus = 2
+)
+
+type StatsType uint32
+
+const (
+ BPF_STATS_RUN_TIME StatsType = 0
+)
+
+type XdpAction uint32
+
+const (
+ XDP_ABORTED XdpAction = 0
+ XDP_DROP XdpAction = 1
+ XDP_PASS XdpAction = 2
+ XDP_TX XdpAction = 3
+ XDP_REDIRECT XdpAction = 4
+)
+
+type BtfInfo struct {
+ Btf Pointer
+ BtfSize uint32
+ Id BTFID
+ Name Pointer
+ NameLen uint32
+ KernelBtf uint32
+}
+
+type FuncInfo struct {
+ InsnOff uint32
+ TypeId uint32
+}
+
+type LineInfo struct {
+ InsnOff uint32
+ FileNameOff uint32
+ LineOff uint32
+ LineCol uint32
+}
+
+type LinkInfo struct {
+ Type LinkType
+ Id LinkID
+ ProgId uint32
+ _ [4]byte
+ Extra [32]uint8
+}
+
+type MapInfo struct {
+ Type uint32
+ Id uint32
+ KeySize uint32
+ ValueSize uint32
+ MaxEntries uint32
+ MapFlags MapFlags
+ Name ObjName
+ Ifindex uint32
+ BtfVmlinuxValueTypeId TypeID
+ NetnsDev uint64
+ NetnsIno uint64
+ BtfId uint32
+ BtfKeyTypeId TypeID
+ BtfValueTypeId TypeID
+ _ [4]byte
+ MapExtra uint64
+}
+
+type ProgInfo struct {
+ Type uint32
+ Id uint32
+ Tag [8]uint8
+ JitedProgLen uint32
+ XlatedProgLen uint32
+ JitedProgInsns uint64
+ XlatedProgInsns Pointer
+ LoadTime uint64
+ CreatedByUid uint32
+ NrMapIds uint32
+ MapIds Pointer
+ Name ObjName
+ Ifindex uint32
+ _ [4]byte /* unsupported bitfield */
+ NetnsDev uint64
+ NetnsIno uint64
+ NrJitedKsyms uint32
+ NrJitedFuncLens uint32
+ JitedKsyms uint64
+ JitedFuncLens uint64
+ BtfId BTFID
+ FuncInfoRecSize uint32
+ FuncInfo Pointer
+ NrFuncInfo uint32
+ NrLineInfo uint32
+ LineInfo Pointer
+ JitedLineInfo uint64
+ NrJitedLineInfo uint32
+ LineInfoRecSize uint32
+ JitedLineInfoRecSize uint32
+ NrProgTags uint32
+ ProgTags uint64
+ RunTimeNs uint64
+ RunCnt uint64
+ RecursionMisses uint64
+ VerifiedInsns uint32
+ AttachBtfObjId BTFID
+ AttachBtfId TypeID
+ _ [4]byte
+}
+
+type SkLookup struct {
+ Cookie uint64
+ Family uint32
+ Protocol uint32
+ RemoteIp4 [4]uint8
+ RemoteIp6 [16]uint8
+ RemotePort uint16
+ _ [2]byte
+ LocalIp4 [4]uint8
+ LocalIp6 [16]uint8
+ LocalPort uint32
+ IngressIfindex uint32
+ _ [4]byte
+}
+
+type XdpMd struct {
+ Data uint32
+ DataEnd uint32
+ DataMeta uint32
+ IngressIfindex uint32
+ RxQueueIndex uint32
+ EgressIfindex uint32
+}
+
+type BtfGetFdByIdAttr struct{ Id uint32 }
+
+func BtfGetFdById(attr *BtfGetFdByIdAttr) (*FD, error) {
+ fd, err := BPF(BPF_BTF_GET_FD_BY_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ if err != nil {
+ return nil, err
+ }
+ return NewFD(int(fd))
+}
+
+type BtfGetNextIdAttr struct {
+ Id BTFID
+ NextId BTFID
+}
+
+func BtfGetNextId(attr *BtfGetNextIdAttr) error {
+ _, err := BPF(BPF_BTF_GET_NEXT_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ return err
+}
+
+type BtfLoadAttr struct {
+ Btf Pointer
+ BtfLogBuf Pointer
+ BtfSize uint32
+ BtfLogSize uint32
+ BtfLogLevel uint32
+ _ [4]byte
+}
+
+func BtfLoad(attr *BtfLoadAttr) (*FD, error) {
+ fd, err := BPF(BPF_BTF_LOAD, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ if err != nil {
+ return nil, err
+ }
+ return NewFD(int(fd))
+}
+
+type EnableStatsAttr struct{ Type uint32 }
+
+func EnableStats(attr *EnableStatsAttr) (*FD, error) {
+ fd, err := BPF(BPF_ENABLE_STATS, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ if err != nil {
+ return nil, err
+ }
+ return NewFD(int(fd))
+}
+
+type IterCreateAttr struct {
+ LinkFd uint32
+ Flags uint32
+}
+
+func IterCreate(attr *IterCreateAttr) (*FD, error) {
+ fd, err := BPF(BPF_ITER_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ if err != nil {
+ return nil, err
+ }
+ return NewFD(int(fd))
+}
+
+type LinkCreateAttr struct {
+ ProgFd uint32
+ TargetFd uint32
+ AttachType AttachType
+ Flags uint32
+ TargetBtfId TypeID
+ _ [28]byte
+}
+
+func LinkCreate(attr *LinkCreateAttr) (*FD, error) {
+ fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ if err != nil {
+ return nil, err
+ }
+ return NewFD(int(fd))
+}
+
+type LinkCreateIterAttr struct {
+ ProgFd uint32
+ TargetFd uint32
+ AttachType AttachType
+ Flags uint32
+ IterInfo Pointer
+ IterInfoLen uint32
+ _ [20]byte
+}
+
+func LinkCreateIter(attr *LinkCreateIterAttr) (*FD, error) {
+ fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ if err != nil {
+ return nil, err
+ }
+ return NewFD(int(fd))
+}
+
+type LinkCreateKprobeMultiAttr struct {
+ ProgFd uint32
+ TargetFd uint32
+ AttachType AttachType
+ Flags uint32
+ KprobeMultiFlags uint32
+ Count uint32
+ Syms Pointer
+ Addrs Pointer
+ Cookies Pointer
+}
+
+func LinkCreateKprobeMulti(attr *LinkCreateKprobeMultiAttr) (*FD, error) {
+ fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ if err != nil {
+ return nil, err
+ }
+ return NewFD(int(fd))
+}
+
+type LinkCreatePerfEventAttr struct {
+ ProgFd uint32
+ TargetFd uint32
+ AttachType AttachType
+ Flags uint32
+ BpfCookie uint64
+ _ [24]byte
+}
+
+func LinkCreatePerfEvent(attr *LinkCreatePerfEventAttr) (*FD, error) {
+ fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ if err != nil {
+ return nil, err
+ }
+ return NewFD(int(fd))
+}
+
+type LinkCreateTracingAttr struct {
+ ProgFd uint32
+ TargetFd uint32
+ AttachType AttachType
+ Flags uint32
+ TargetBtfId BTFID
+ _ [4]byte
+ Cookie uint64
+ _ [16]byte
+}
+
+func LinkCreateTracing(attr *LinkCreateTracingAttr) (*FD, error) {
+ fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ if err != nil {
+ return nil, err
+ }
+ return NewFD(int(fd))
+}
+
+type LinkUpdateAttr struct {
+ LinkFd uint32
+ NewProgFd uint32
+ Flags uint32
+ OldProgFd uint32
+}
+
+func LinkUpdate(attr *LinkUpdateAttr) error {
+ _, err := BPF(BPF_LINK_UPDATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ return err
+}
+
+type MapCreateAttr struct {
+ MapType MapType
+ KeySize uint32
+ ValueSize uint32
+ MaxEntries uint32
+ MapFlags MapFlags
+ InnerMapFd uint32
+ NumaNode uint32
+ MapName ObjName
+ MapIfindex uint32
+ BtfFd uint32
+ BtfKeyTypeId TypeID
+ BtfValueTypeId TypeID
+ BtfVmlinuxValueTypeId TypeID
+ MapExtra uint64
+}
+
+func MapCreate(attr *MapCreateAttr) (*FD, error) {
+ fd, err := BPF(BPF_MAP_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ if err != nil {
+ return nil, err
+ }
+ return NewFD(int(fd))
+}
+
+type MapDeleteBatchAttr struct {
+ InBatch Pointer
+ OutBatch Pointer
+ Keys Pointer
+ Values Pointer
+ Count uint32
+ MapFd uint32
+ ElemFlags uint64
+ Flags uint64
+}
+
+func MapDeleteBatch(attr *MapDeleteBatchAttr) error {
+ _, err := BPF(BPF_MAP_DELETE_BATCH, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ return err
+}
+
+type MapDeleteElemAttr struct {
+ MapFd uint32
+ _ [4]byte
+ Key Pointer
+ Value Pointer
+ Flags uint64
+}
+
+func MapDeleteElem(attr *MapDeleteElemAttr) error {
+ _, err := BPF(BPF_MAP_DELETE_ELEM, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ return err
+}
+
+type MapFreezeAttr struct{ MapFd uint32 }
+
+func MapFreeze(attr *MapFreezeAttr) error {
+ _, err := BPF(BPF_MAP_FREEZE, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ return err
+}
+
+type MapGetFdByIdAttr struct{ Id uint32 }
+
+func MapGetFdById(attr *MapGetFdByIdAttr) (*FD, error) {
+ fd, err := BPF(BPF_MAP_GET_FD_BY_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ if err != nil {
+ return nil, err
+ }
+ return NewFD(int(fd))
+}
+
+type MapGetNextIdAttr struct {
+ Id uint32
+ NextId uint32
+}
+
+func MapGetNextId(attr *MapGetNextIdAttr) error {
+ _, err := BPF(BPF_MAP_GET_NEXT_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ return err
+}
+
+type MapGetNextKeyAttr struct {
+ MapFd uint32
+ _ [4]byte
+ Key Pointer
+ NextKey Pointer
+}
+
+func MapGetNextKey(attr *MapGetNextKeyAttr) error {
+ _, err := BPF(BPF_MAP_GET_NEXT_KEY, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ return err
+}
+
+type MapLookupAndDeleteBatchAttr struct {
+ InBatch Pointer
+ OutBatch Pointer
+ Keys Pointer
+ Values Pointer
+ Count uint32
+ MapFd uint32
+ ElemFlags uint64
+ Flags uint64
+}
+
+func MapLookupAndDeleteBatch(attr *MapLookupAndDeleteBatchAttr) error {
+ _, err := BPF(BPF_MAP_LOOKUP_AND_DELETE_BATCH, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ return err
+}
+
+type MapLookupAndDeleteElemAttr struct {
+ MapFd uint32
+ _ [4]byte
+ Key Pointer
+ Value Pointer
+ Flags uint64
+}
+
+func MapLookupAndDeleteElem(attr *MapLookupAndDeleteElemAttr) error {
+ _, err := BPF(BPF_MAP_LOOKUP_AND_DELETE_ELEM, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ return err
+}
+
+type MapLookupBatchAttr struct {
+ InBatch Pointer
+ OutBatch Pointer
+ Keys Pointer
+ Values Pointer
+ Count uint32
+ MapFd uint32
+ ElemFlags uint64
+ Flags uint64
+}
+
+func MapLookupBatch(attr *MapLookupBatchAttr) error {
+ _, err := BPF(BPF_MAP_LOOKUP_BATCH, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ return err
+}
+
+type MapLookupElemAttr struct {
+ MapFd uint32
+ _ [4]byte
+ Key Pointer
+ Value Pointer
+ Flags uint64
+}
+
+func MapLookupElem(attr *MapLookupElemAttr) error {
+ _, err := BPF(BPF_MAP_LOOKUP_ELEM, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ return err
+}
+
+type MapUpdateBatchAttr struct {
+ InBatch Pointer
+ OutBatch Pointer
+ Keys Pointer
+ Values Pointer
+ Count uint32
+ MapFd uint32
+ ElemFlags uint64
+ Flags uint64
+}
+
+func MapUpdateBatch(attr *MapUpdateBatchAttr) error {
+ _, err := BPF(BPF_MAP_UPDATE_BATCH, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ return err
+}
+
+type MapUpdateElemAttr struct {
+ MapFd uint32
+ _ [4]byte
+ Key Pointer
+ Value Pointer
+ Flags uint64
+}
+
+func MapUpdateElem(attr *MapUpdateElemAttr) error {
+ _, err := BPF(BPF_MAP_UPDATE_ELEM, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ return err
+}
+
+type ObjGetAttr struct {
+ Pathname Pointer
+ BpfFd uint32
+ FileFlags uint32
+}
+
+func ObjGet(attr *ObjGetAttr) (*FD, error) {
+ fd, err := BPF(BPF_OBJ_GET, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ if err != nil {
+ return nil, err
+ }
+ return NewFD(int(fd))
+}
+
+type ObjGetInfoByFdAttr struct {
+ BpfFd uint32
+ InfoLen uint32
+ Info Pointer
+}
+
+func ObjGetInfoByFd(attr *ObjGetInfoByFdAttr) error {
+ _, err := BPF(BPF_OBJ_GET_INFO_BY_FD, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ return err
+}
+
+type ObjPinAttr struct {
+ Pathname Pointer
+ BpfFd uint32
+ FileFlags uint32
+}
+
+func ObjPin(attr *ObjPinAttr) error {
+ _, err := BPF(BPF_OBJ_PIN, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ return err
+}
+
+type ProgAttachAttr struct {
+ TargetFd uint32
+ AttachBpfFd uint32
+ AttachType uint32
+ AttachFlags uint32
+ ReplaceBpfFd uint32
+}
+
+func ProgAttach(attr *ProgAttachAttr) error {
+ _, err := BPF(BPF_PROG_ATTACH, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ return err
+}
+
+type ProgBindMapAttr struct {
+ ProgFd uint32
+ MapFd uint32
+ Flags uint32
+}
+
+func ProgBindMap(attr *ProgBindMapAttr) error {
+ _, err := BPF(BPF_PROG_BIND_MAP, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ return err
+}
+
+type ProgDetachAttr struct {
+ TargetFd uint32
+ AttachBpfFd uint32
+ AttachType uint32
+}
+
+func ProgDetach(attr *ProgDetachAttr) error {
+ _, err := BPF(BPF_PROG_DETACH, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ return err
+}
+
+type ProgGetFdByIdAttr struct{ Id uint32 }
+
+func ProgGetFdById(attr *ProgGetFdByIdAttr) (*FD, error) {
+ fd, err := BPF(BPF_PROG_GET_FD_BY_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ if err != nil {
+ return nil, err
+ }
+ return NewFD(int(fd))
+}
+
+type ProgGetNextIdAttr struct {
+ Id uint32
+ NextId uint32
+}
+
+func ProgGetNextId(attr *ProgGetNextIdAttr) error {
+ _, err := BPF(BPF_PROG_GET_NEXT_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ return err
+}
+
+type ProgLoadAttr struct {
+ ProgType ProgType
+ InsnCnt uint32
+ Insns Pointer
+ License Pointer
+ LogLevel LogLevel
+ LogSize uint32
+ LogBuf Pointer
+ KernVersion uint32
+ ProgFlags uint32
+ ProgName ObjName
+ ProgIfindex uint32
+ ExpectedAttachType AttachType
+ ProgBtfFd uint32
+ FuncInfoRecSize uint32
+ FuncInfo Pointer
+ FuncInfoCnt uint32
+ LineInfoRecSize uint32
+ LineInfo Pointer
+ LineInfoCnt uint32
+ AttachBtfId TypeID
+ AttachBtfObjFd uint32
+ CoreReloCnt uint32
+ FdArray Pointer
+ CoreRelos Pointer
+ CoreReloRecSize uint32
+ _ [4]byte
+}
+
+func ProgLoad(attr *ProgLoadAttr) (*FD, error) {
+ fd, err := BPF(BPF_PROG_LOAD, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ if err != nil {
+ return nil, err
+ }
+ return NewFD(int(fd))
+}
+
+type ProgQueryAttr struct {
+ TargetFd uint32
+ AttachType AttachType
+ QueryFlags uint32
+ AttachFlags uint32
+ ProgIds Pointer
+ ProgCount uint32
+ _ [4]byte
+ ProgAttachFlags uint64
+}
+
+func ProgQuery(attr *ProgQueryAttr) error {
+ _, err := BPF(BPF_PROG_QUERY, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ return err
+}
+
+type ProgRunAttr struct {
+ ProgFd uint32
+ Retval uint32
+ DataSizeIn uint32
+ DataSizeOut uint32
+ DataIn Pointer
+ DataOut Pointer
+ Repeat uint32
+ Duration uint32
+ CtxSizeIn uint32
+ CtxSizeOut uint32
+ CtxIn Pointer
+ CtxOut Pointer
+ Flags uint32
+ Cpu uint32
+ BatchSize uint32
+ _ [4]byte
+}
+
+func ProgRun(attr *ProgRunAttr) error {
+ _, err := BPF(BPF_PROG_TEST_RUN, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ return err
+}
+
+type RawTracepointOpenAttr struct {
+ Name Pointer
+ ProgFd uint32
+ _ [4]byte
+}
+
+func RawTracepointOpen(attr *RawTracepointOpenAttr) (*FD, error) {
+ fd, err := BPF(BPF_RAW_TRACEPOINT_OPEN, unsafe.Pointer(attr), unsafe.Sizeof(*attr))
+ if err != nil {
+ return nil, err
+ }
+ return NewFD(int(fd))
+}
+
+type CgroupLinkInfo struct {
+ CgroupId uint64
+ AttachType AttachType
+ _ [4]byte
+}
+
+type IterLinkInfo struct {
+ TargetName Pointer
+ TargetNameLen uint32
+}
+
+type NetNsLinkInfo struct {
+ NetnsIno uint32
+ AttachType AttachType
+}
+
+type RawTracepointLinkInfo struct {
+ TpName Pointer
+ TpNameLen uint32
+ _ [4]byte
+}
+
+type TracingLinkInfo struct {
+ AttachType AttachType
+ TargetObjId uint32
+ TargetBtfId TypeID
+}
+
+type XDPLinkInfo struct{ Ifindex uint32 }
diff --git a/vendor/github.com/cilium/ebpf/internal/sysenc/buffer.go b/vendor/github.com/cilium/ebpf/internal/sysenc/buffer.go
new file mode 100644
index 000000000..c6959d9cc
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/sysenc/buffer.go
@@ -0,0 +1,77 @@
+package sysenc
+
+import (
+ "unsafe"
+
+ "github.com/cilium/ebpf/internal/sys"
+)
+
+type Buffer struct {
+ ptr unsafe.Pointer
+ // Size of the buffer. syscallPointerOnly if created from UnsafeBuffer or when using
+ // zero-copy unmarshaling.
+ size int
+}
+
+const syscallPointerOnly = -1
+
+func newBuffer(buf []byte) Buffer {
+ if len(buf) == 0 {
+ return Buffer{}
+ }
+ return Buffer{unsafe.Pointer(&buf[0]), len(buf)}
+}
+
+// UnsafeBuffer constructs a Buffer for zero-copy unmarshaling.
+//
+// [Pointer] is the only valid method to call on such a Buffer.
+// Use [SyscallBuffer] instead if possible.
+func UnsafeBuffer(ptr unsafe.Pointer) Buffer {
+ return Buffer{ptr, syscallPointerOnly}
+}
+
+// SyscallOutput prepares a Buffer for a syscall to write into.
+//
+// The buffer may point at the underlying memory of dst, in which case [Unmarshal]
+// becomes a no-op.
+//
+// The contents of the buffer are undefined and may be non-zero.
+func SyscallOutput(dst any, size int) Buffer {
+ if dstBuf := unsafeBackingMemory(dst); len(dstBuf) == size {
+ buf := newBuffer(dstBuf)
+ buf.size = syscallPointerOnly
+ return buf
+ }
+
+ return newBuffer(make([]byte, size))
+}
+
+// CopyTo copies the buffer into dst.
+//
+// Returns the number of copied bytes.
+func (b Buffer) CopyTo(dst []byte) int {
+ return copy(dst, b.unsafeBytes())
+}
+
+// Pointer returns the location where a syscall should write.
+func (b Buffer) Pointer() sys.Pointer {
+ // NB: This deliberately ignores b.length to support zero-copy
+ // marshaling / unmarshaling using unsafe.Pointer.
+ return sys.NewPointer(b.ptr)
+}
+
+// Unmarshal the buffer into the provided value.
+func (b Buffer) Unmarshal(data any) error {
+ if b.size == syscallPointerOnly {
+ return nil
+ }
+
+ return Unmarshal(data, b.unsafeBytes())
+}
+
+func (b Buffer) unsafeBytes() []byte {
+ if b.size == syscallPointerOnly {
+ return nil
+ }
+ return unsafe.Slice((*byte)(b.ptr), b.size)
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/sysenc/doc.go b/vendor/github.com/cilium/ebpf/internal/sysenc/doc.go
new file mode 100644
index 000000000..676ad98ba
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/sysenc/doc.go
@@ -0,0 +1,3 @@
+// Package sysenc provides efficient conversion of Go values to system
+// call interfaces.
+package sysenc
diff --git a/vendor/github.com/cilium/ebpf/internal/sysenc/layout.go b/vendor/github.com/cilium/ebpf/internal/sysenc/layout.go
new file mode 100644
index 000000000..52d111e7a
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/sysenc/layout.go
@@ -0,0 +1,41 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found at https://go.dev/LICENSE.
+
+package sysenc
+
+import (
+ "reflect"
+ "sync"
+)
+
+var hasUnexportedFieldsCache sync.Map // map[reflect.Type]bool
+
+func hasUnexportedFields(typ reflect.Type) bool {
+ switch typ.Kind() {
+ case reflect.Slice, reflect.Array, reflect.Pointer:
+ return hasUnexportedFields(typ.Elem())
+
+ case reflect.Struct:
+ if unexported, ok := hasUnexportedFieldsCache.Load(typ); ok {
+ return unexported.(bool)
+ }
+
+ unexported := false
+ for i, n := 0, typ.NumField(); i < n; i++ {
+ field := typ.Field(i)
+ // Package binary allows _ fields but always writes zeroes into them.
+ if (!field.IsExported() && field.Name != "_") || hasUnexportedFields(field.Type) {
+ unexported = true
+ break
+ }
+ }
+
+ hasUnexportedFieldsCache.Store(typ, unexported)
+ return unexported
+
+ default:
+ // NB: It's not clear what this means for Chan and so on.
+ return false
+ }
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/sysenc/marshal.go b/vendor/github.com/cilium/ebpf/internal/sysenc/marshal.go
new file mode 100644
index 000000000..235a1df26
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/sysenc/marshal.go
@@ -0,0 +1,163 @@
+package sysenc
+
+import (
+ "bytes"
+ "encoding"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "reflect"
+ "sync"
+ "unsafe"
+
+ "github.com/cilium/ebpf/internal"
+)
+
+// Marshal turns data into a byte slice using the system's native endianness.
+//
+// If possible, avoids allocations by directly using the backing memory
+// of data. This means that the variable must not be modified for the lifetime
+// of the returned [Buffer].
+//
+// Returns an error if the data can't be turned into a byte slice according to
+// the behaviour of [binary.Write].
+func Marshal(data any, size int) (Buffer, error) {
+ if data == nil {
+ return Buffer{}, errors.New("can't marshal a nil value")
+ }
+
+ var buf []byte
+ var err error
+ switch value := data.(type) {
+ case encoding.BinaryMarshaler:
+ buf, err = value.MarshalBinary()
+ case string:
+ buf = unsafe.Slice(unsafe.StringData(value), len(value))
+ case []byte:
+ buf = value
+ case int16:
+ buf = internal.NativeEndian.AppendUint16(make([]byte, 0, 2), uint16(value))
+ case uint16:
+ buf = internal.NativeEndian.AppendUint16(make([]byte, 0, 2), value)
+ case int32:
+ buf = internal.NativeEndian.AppendUint32(make([]byte, 0, 4), uint32(value))
+ case uint32:
+ buf = internal.NativeEndian.AppendUint32(make([]byte, 0, 4), value)
+ case int64:
+ buf = internal.NativeEndian.AppendUint64(make([]byte, 0, 8), uint64(value))
+ case uint64:
+ buf = internal.NativeEndian.AppendUint64(make([]byte, 0, 8), value)
+ default:
+ if buf := unsafeBackingMemory(data); len(buf) == size {
+ return newBuffer(buf), nil
+ }
+
+ wr := internal.NewBuffer(make([]byte, 0, size))
+ defer internal.PutBuffer(wr)
+
+ err = binary.Write(wr, internal.NativeEndian, value)
+ buf = wr.Bytes()
+ }
+ if err != nil {
+ return Buffer{}, err
+ }
+
+ if len(buf) != size {
+ return Buffer{}, fmt.Errorf("%T doesn't marshal to %d bytes", data, size)
+ }
+
+ return newBuffer(buf), nil
+}
+
+var bytesReaderPool = sync.Pool{
+ New: func() interface{} {
+ return new(bytes.Reader)
+ },
+}
+
+// Unmarshal a byte slice in the system's native endianness into data.
+//
+// Returns an error if buf can't be unmarshalled according to the behaviour
+// of [binary.Read].
+func Unmarshal(data interface{}, buf []byte) error {
+ switch value := data.(type) {
+ case encoding.BinaryUnmarshaler:
+ return value.UnmarshalBinary(buf)
+
+ case *string:
+ *value = string(buf)
+ return nil
+
+ default:
+ if dataBuf := unsafeBackingMemory(data); len(dataBuf) == len(buf) {
+ copy(dataBuf, buf)
+ return nil
+ }
+
+ rd := bytesReaderPool.Get().(*bytes.Reader)
+ defer bytesReaderPool.Put(rd)
+
+ rd.Reset(buf)
+
+ return binary.Read(rd, internal.NativeEndian, value)
+ }
+}
+
+// unsafeBackingMemory returns the backing memory of data if it can be used
+// instead of calling into package binary.
+//
+// Returns nil if the value is not a pointer or a slice, or if it contains
+// padding or unexported fields.
+func unsafeBackingMemory(data any) []byte {
+ if data == nil {
+ return nil
+ }
+
+ value := reflect.ValueOf(data)
+ var valueSize int
+ switch value.Kind() {
+ case reflect.Pointer:
+ if value.IsNil() {
+ return nil
+ }
+
+ if elemType := value.Type().Elem(); elemType.Kind() != reflect.Slice {
+ valueSize = int(elemType.Size())
+ break
+ }
+
+ // We're dealing with a pointer to a slice. Dereference and
+ // handle it like a regular slice.
+ value = value.Elem()
+ fallthrough
+
+ case reflect.Slice:
+ valueSize = int(value.Type().Elem().Size()) * value.Len()
+
+ default:
+ // Prevent Value.UnsafePointer from panicking.
+ return nil
+ }
+
+ // Some nil pointer types currently crash binary.Size. Call it after our own
+ // code so that the panic isn't reachable.
+ // See https://github.com/golang/go/issues/60892
+ if size := binary.Size(data); size == -1 || size != valueSize {
+ // The type contains padding or unsupported types.
+ return nil
+ }
+
+ if hasUnexportedFields(reflect.TypeOf(data)) {
+ return nil
+ }
+
+ // Reinterpret the pointer as a byte slice. This violates the unsafe.Pointer
+ // rules because it's very unlikely that the source data has "an equivalent
+ // memory layout". However, we can make it safe-ish because of the
+ // following reasons:
+ // - There is no alignment mismatch since we cast to a type with an
+ // alignment of 1.
+ // - There are no pointers in the source type so we don't upset the GC.
+ // - The length is verified at runtime.
+ return unsafe.Slice((*byte)(value.UnsafePointer()), valueSize)
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/tracefs/kprobe.go b/vendor/github.com/cilium/ebpf/internal/tracefs/kprobe.go
new file mode 100644
index 000000000..1b45a9a74
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/tracefs/kprobe.go
@@ -0,0 +1,359 @@
+package tracefs
+
+import (
+ "crypto/rand"
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "syscall"
+
+ "github.com/cilium/ebpf/internal"
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+var (
+ ErrInvalidInput = errors.New("invalid input")
+
+ ErrInvalidMaxActive = errors.New("can only set maxactive on kretprobes")
+)
+
+//go:generate go run golang.org/x/tools/cmd/stringer@latest -type=ProbeType -linecomment
+
+type ProbeType uint8
+
+const (
+ Kprobe ProbeType = iota // kprobe
+ Uprobe // uprobe
+)
+
+func (pt ProbeType) eventsFile() (*os.File, error) {
+ path, err := sanitizeTracefsPath(fmt.Sprintf("%s_events", pt.String()))
+ if err != nil {
+ return nil, err
+ }
+
+ return os.OpenFile(path, os.O_APPEND|os.O_WRONLY, 0666)
+}
+
+type ProbeArgs struct {
+ Type ProbeType
+ Symbol, Group, Path string
+ Offset, RefCtrOffset, Cookie uint64
+ Pid, RetprobeMaxActive int
+ Ret bool
+}
+
+// RandomGroup generates a pseudorandom string for use as a tracefs group name.
+// Returns an error when the output string would exceed 63 characters (kernel
+// limitation), when rand.Read() fails or when prefix contains characters not
+// allowed by IsValidTraceID.
+func RandomGroup(prefix string) (string, error) {
+ if !validIdentifier(prefix) {
+ return "", fmt.Errorf("prefix '%s' must be alphanumeric or underscore: %w", prefix, ErrInvalidInput)
+ }
+
+ b := make([]byte, 8)
+ if _, err := rand.Read(b); err != nil {
+ return "", fmt.Errorf("reading random bytes: %w", err)
+ }
+
+ group := fmt.Sprintf("%s_%x", prefix, b)
+ if len(group) > 63 {
+ return "", fmt.Errorf("group name '%s' cannot be longer than 63 characters: %w", group, ErrInvalidInput)
+ }
+
+ return group, nil
+}
+
+// validIdentifier implements the equivalent of a regex match
+// against "^[a-zA-Z_][0-9a-zA-Z_]*$".
+//
+// Trace event groups, names and kernel symbols must adhere to this set
+// of characters. Non-empty, first character must not be a number, all
+// characters must be alphanumeric or underscore.
+func validIdentifier(s string) bool {
+ if len(s) < 1 {
+ return false
+ }
+ for i, c := range []byte(s) {
+ switch {
+ case c >= 'a' && c <= 'z':
+ case c >= 'A' && c <= 'Z':
+ case c == '_':
+ case i > 0 && c >= '0' && c <= '9':
+
+ default:
+ return false
+ }
+ }
+
+ return true
+}
+
+func sanitizeTracefsPath(path ...string) (string, error) {
+ base, err := getTracefsPath()
+ if err != nil {
+ return "", err
+ }
+ l := filepath.Join(path...)
+ p := filepath.Join(base, l)
+ if !strings.HasPrefix(p, base) {
+ return "", fmt.Errorf("path '%s' attempts to escape base path '%s': %w", l, base, ErrInvalidInput)
+ }
+ return p, nil
+}
+
+// getTracefsPath will return a correct path to the tracefs mount point.
+// Since kernel 4.1 tracefs should be mounted by default at /sys/kernel/tracing,
+// but may be also be available at /sys/kernel/debug/tracing if debugfs is mounted.
+// The available tracefs paths will depends on distribution choices.
+var getTracefsPath = internal.Memoize(func() (string, error) {
+ for _, p := range []struct {
+ path string
+ fsType int64
+ }{
+ {"/sys/kernel/tracing", unix.TRACEFS_MAGIC},
+ {"/sys/kernel/debug/tracing", unix.TRACEFS_MAGIC},
+ // RHEL/CentOS
+ {"/sys/kernel/debug/tracing", unix.DEBUGFS_MAGIC},
+ } {
+ if fsType, err := internal.FSType(p.path); err == nil && fsType == p.fsType {
+ return p.path, nil
+ }
+ }
+
+ return "", errors.New("neither debugfs nor tracefs are mounted")
+})
+
+// sanitizeIdentifier replaces every invalid character for the tracefs api with an underscore.
+//
+// It is equivalent to calling regexp.MustCompile("[^a-zA-Z0-9]+").ReplaceAllString("_").
+func sanitizeIdentifier(s string) string {
+ var skip bool
+ return strings.Map(func(c rune) rune {
+ switch {
+ case c >= 'a' && c <= 'z',
+ c >= 'A' && c <= 'Z',
+ c >= '0' && c <= '9':
+ skip = false
+ return c
+
+ case skip:
+ return -1
+
+ default:
+ skip = true
+ return '_'
+ }
+ }, s)
+}
+
+// EventID reads a trace event's ID from tracefs given its group and name.
+// The kernel requires group and name to be alphanumeric or underscore.
+func EventID(group, name string) (uint64, error) {
+ if !validIdentifier(group) {
+ return 0, fmt.Errorf("invalid tracefs group: %q", group)
+ }
+
+ if !validIdentifier(name) {
+ return 0, fmt.Errorf("invalid tracefs name: %q", name)
+ }
+
+ path, err := sanitizeTracefsPath("events", group, name, "id")
+ if err != nil {
+ return 0, err
+ }
+ tid, err := internal.ReadUint64FromFile("%d\n", path)
+ if errors.Is(err, os.ErrNotExist) {
+ return 0, err
+ }
+ if err != nil {
+ return 0, fmt.Errorf("reading trace event ID of %s/%s: %w", group, name, err)
+ }
+
+ return tid, nil
+}
+
+func probePrefix(ret bool, maxActive int) string {
+ if ret {
+ if maxActive > 0 {
+ return fmt.Sprintf("r%d", maxActive)
+ }
+ return "r"
+ }
+ return "p"
+}
+
+// Event represents an entry in a tracefs probe events file.
+type Event struct {
+ typ ProbeType
+ group, name string
+ // event id allocated by the kernel. 0 if the event has already been removed.
+ id uint64
+}
+
+// NewEvent creates a new ephemeral trace event.
+//
+// Returns os.ErrNotExist if symbol is not a valid
+// kernel symbol, or if it is not traceable with kprobes. Returns os.ErrExist
+// if a probe with the same group and symbol already exists. Returns an error if
+// args.RetprobeMaxActive is used on non kprobe types. Returns ErrNotSupported if
+// the kernel is too old to support kretprobe maxactive.
+func NewEvent(args ProbeArgs) (*Event, error) {
+ // Before attempting to create a trace event through tracefs,
+ // check if an event with the same group and name already exists.
+ // Kernels 4.x and earlier don't return os.ErrExist on writing a duplicate
+ // entry, so we need to rely on reads for detecting uniqueness.
+ eventName := sanitizeIdentifier(args.Symbol)
+ _, err := EventID(args.Group, eventName)
+ if err == nil {
+ return nil, fmt.Errorf("trace event %s/%s: %w", args.Group, eventName, os.ErrExist)
+ }
+ if err != nil && !errors.Is(err, os.ErrNotExist) {
+ return nil, fmt.Errorf("checking trace event %s/%s: %w", args.Group, eventName, err)
+ }
+
+ // Open the kprobe_events file in tracefs.
+ f, err := args.Type.eventsFile()
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ var pe, token string
+ switch args.Type {
+ case Kprobe:
+ // The kprobe_events syntax is as follows (see Documentation/trace/kprobetrace.txt):
+ // p[:[GRP/]EVENT] [MOD:]SYM[+offs]|MEMADDR [FETCHARGS] : Set a probe
+ // r[MAXACTIVE][:[GRP/]EVENT] [MOD:]SYM[+0] [FETCHARGS] : Set a return probe
+ // -:[GRP/]EVENT : Clear a probe
+ //
+ // Some examples:
+ // r:ebpf_1234/r_my_kretprobe nf_conntrack_destroy
+ // p:ebpf_5678/p_my_kprobe __x64_sys_execve
+ //
+ // Leaving the kretprobe's MAXACTIVE set to 0 (or absent) will make the
+ // kernel default to NR_CPUS. This is desired in most eBPF cases since
+ // subsampling or rate limiting logic can be more accurately implemented in
+ // the eBPF program itself.
+ // See Documentation/kprobes.txt for more details.
+ if args.RetprobeMaxActive != 0 && !args.Ret {
+ return nil, ErrInvalidMaxActive
+ }
+ token = KprobeToken(args)
+ pe = fmt.Sprintf("%s:%s/%s %s", probePrefix(args.Ret, args.RetprobeMaxActive), args.Group, eventName, token)
+ case Uprobe:
+ // The uprobe_events syntax is as follows:
+ // p[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS] : Set a probe
+ // r[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS] : Set a return probe
+ // -:[GRP/]EVENT : Clear a probe
+ //
+ // Some examples:
+ // r:ebpf_1234/readline /bin/bash:0x12345
+ // p:ebpf_5678/main_mySymbol /bin/mybin:0x12345(0x123)
+ //
+ // See Documentation/trace/uprobetracer.txt for more details.
+ if args.RetprobeMaxActive != 0 {
+ return nil, ErrInvalidMaxActive
+ }
+ token = UprobeToken(args)
+ pe = fmt.Sprintf("%s:%s/%s %s", probePrefix(args.Ret, 0), args.Group, eventName, token)
+ }
+ _, err = f.WriteString(pe)
+
+ // Since commit 97c753e62e6c, ENOENT is correctly returned instead of EINVAL
+ // when trying to create a retprobe for a missing symbol.
+ if errors.Is(err, os.ErrNotExist) {
+ return nil, fmt.Errorf("token %s: not found: %w", token, err)
+ }
+ // Since commit ab105a4fb894, EILSEQ is returned when a kprobe sym+offset is resolved
+ // to an invalid insn boundary. The exact conditions that trigger this error are
+ // arch specific however.
+ if errors.Is(err, syscall.EILSEQ) {
+ return nil, fmt.Errorf("token %s: bad insn boundary: %w", token, os.ErrNotExist)
+ }
+ // ERANGE is returned when the `SYM[+offs]` token is too big and cannot
+ // be resolved.
+ if errors.Is(err, syscall.ERANGE) {
+ return nil, fmt.Errorf("token %s: offset too big: %w", token, os.ErrNotExist)
+ }
+
+ if err != nil {
+ return nil, fmt.Errorf("token %s: writing '%s': %w", token, pe, err)
+ }
+
+ // Get the newly-created trace event's id.
+ tid, err := EventID(args.Group, eventName)
+ if args.RetprobeMaxActive != 0 && errors.Is(err, os.ErrNotExist) {
+ // Kernels < 4.12 don't support maxactive and therefore auto generate
+ // group and event names from the symbol and offset. The symbol is used
+ // without any sanitization.
+ // See https://elixir.bootlin.com/linux/v4.10/source/kernel/trace/trace_kprobe.c#L712
+ event := fmt.Sprintf("kprobes/r_%s_%d", args.Symbol, args.Offset)
+ if err := removeEvent(args.Type, event); err != nil {
+ return nil, fmt.Errorf("failed to remove spurious maxactive event: %s", err)
+ }
+ return nil, fmt.Errorf("create trace event with non-default maxactive: %w", internal.ErrNotSupported)
+ }
+ if err != nil {
+ return nil, fmt.Errorf("get trace event id: %w", err)
+ }
+
+ evt := &Event{args.Type, args.Group, eventName, tid}
+ runtime.SetFinalizer(evt, (*Event).Close)
+ return evt, nil
+}
+
+// Close removes the event from tracefs.
+//
+// Returns os.ErrClosed if the event has already been closed before.
+func (evt *Event) Close() error {
+ if evt.id == 0 {
+ return os.ErrClosed
+ }
+
+ evt.id = 0
+ runtime.SetFinalizer(evt, nil)
+ pe := fmt.Sprintf("%s/%s", evt.group, evt.name)
+ return removeEvent(evt.typ, pe)
+}
+
+func removeEvent(typ ProbeType, pe string) error {
+ f, err := typ.eventsFile()
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ // See [k,u]probe_events syntax above. The probe type does not need to be specified
+ // for removals.
+ if _, err = f.WriteString("-:" + pe); err != nil {
+ return fmt.Errorf("remove event %q from %s: %w", pe, f.Name(), err)
+ }
+
+ return nil
+}
+
+// ID returns the tracefs ID associated with the event.
+func (evt *Event) ID() uint64 {
+ return evt.id
+}
+
+// Group returns the tracefs group used by the event.
+func (evt *Event) Group() string {
+ return evt.group
+}
+
+// KprobeToken creates the SYM[+offs] token for the tracefs api.
+func KprobeToken(args ProbeArgs) string {
+ po := args.Symbol
+
+ if args.Offset != 0 {
+ po += fmt.Sprintf("+%#x", args.Offset)
+ }
+
+ return po
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/tracefs/probetype_string.go b/vendor/github.com/cilium/ebpf/internal/tracefs/probetype_string.go
new file mode 100644
index 000000000..87cb0a059
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/tracefs/probetype_string.go
@@ -0,0 +1,24 @@
+// Code generated by "stringer -type=ProbeType -linecomment"; DO NOT EDIT.
+
+package tracefs
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[Kprobe-0]
+ _ = x[Uprobe-1]
+}
+
+const _ProbeType_name = "kprobeuprobe"
+
+var _ProbeType_index = [...]uint8{0, 6, 12}
+
+func (i ProbeType) String() string {
+ if i >= ProbeType(len(_ProbeType_index)-1) {
+ return "ProbeType(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _ProbeType_name[_ProbeType_index[i]:_ProbeType_index[i+1]]
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/tracefs/uprobe.go b/vendor/github.com/cilium/ebpf/internal/tracefs/uprobe.go
new file mode 100644
index 000000000..994f31260
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/tracefs/uprobe.go
@@ -0,0 +1,16 @@
+package tracefs
+
+import "fmt"
+
+// UprobeToken creates the PATH:OFFSET(REF_CTR_OFFSET) token for the tracefs api.
+func UprobeToken(args ProbeArgs) string {
+ po := fmt.Sprintf("%s:%#x", args.Path, args.Offset)
+
+ if args.RefCtrOffset != 0 {
+ // This is not documented in Documentation/trace/uprobetracer.txt.
+ // elixir.bootlin.com/linux/v5.15-rc7/source/kernel/trace/trace.c#L5564
+ po += fmt.Sprintf("(%#x)", args.RefCtrOffset)
+ }
+
+ return po
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/unix/doc.go b/vendor/github.com/cilium/ebpf/internal/unix/doc.go
new file mode 100644
index 000000000..d168d36f1
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/unix/doc.go
@@ -0,0 +1,11 @@
+// Package unix re-exports Linux specific parts of golang.org/x/sys/unix.
+//
+// It avoids breaking compilation on other OS by providing stubs as follows:
+// - Invoking a function always returns an error.
+// - Errnos have distinct, non-zero values.
+// - Constants have distinct but meaningless values.
+// - Types use the same names for members, but may or may not follow the
+// Linux layout.
+package unix
+
+// Note: please don't add any custom API to this package. Use internal/sys instead.
diff --git a/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go b/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go
new file mode 100644
index 000000000..51ed7d059
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go
@@ -0,0 +1,204 @@
+//go:build linux
+
+package unix
+
+import (
+ "syscall"
+
+ linux "golang.org/x/sys/unix"
+)
+
+const (
+ ENOENT = linux.ENOENT
+ EEXIST = linux.EEXIST
+ EAGAIN = linux.EAGAIN
+ ENOSPC = linux.ENOSPC
+ EINVAL = linux.EINVAL
+ EPOLLIN = linux.EPOLLIN
+ EINTR = linux.EINTR
+ EPERM = linux.EPERM
+ ESRCH = linux.ESRCH
+ ENODEV = linux.ENODEV
+ EBADF = linux.EBADF
+ E2BIG = linux.E2BIG
+ EFAULT = linux.EFAULT
+ EACCES = linux.EACCES
+ EILSEQ = linux.EILSEQ
+ EOPNOTSUPP = linux.EOPNOTSUPP
+)
+
+const (
+ BPF_F_NO_PREALLOC = linux.BPF_F_NO_PREALLOC
+ BPF_F_NUMA_NODE = linux.BPF_F_NUMA_NODE
+ BPF_F_RDONLY = linux.BPF_F_RDONLY
+ BPF_F_WRONLY = linux.BPF_F_WRONLY
+ BPF_F_RDONLY_PROG = linux.BPF_F_RDONLY_PROG
+ BPF_F_WRONLY_PROG = linux.BPF_F_WRONLY_PROG
+ BPF_F_SLEEPABLE = linux.BPF_F_SLEEPABLE
+ BPF_F_XDP_HAS_FRAGS = linux.BPF_F_XDP_HAS_FRAGS
+ BPF_F_MMAPABLE = linux.BPF_F_MMAPABLE
+ BPF_F_INNER_MAP = linux.BPF_F_INNER_MAP
+ BPF_F_KPROBE_MULTI_RETURN = linux.BPF_F_KPROBE_MULTI_RETURN
+ BPF_OBJ_NAME_LEN = linux.BPF_OBJ_NAME_LEN
+ BPF_TAG_SIZE = linux.BPF_TAG_SIZE
+ BPF_RINGBUF_BUSY_BIT = linux.BPF_RINGBUF_BUSY_BIT
+ BPF_RINGBUF_DISCARD_BIT = linux.BPF_RINGBUF_DISCARD_BIT
+ BPF_RINGBUF_HDR_SZ = linux.BPF_RINGBUF_HDR_SZ
+ SYS_BPF = linux.SYS_BPF
+ F_DUPFD_CLOEXEC = linux.F_DUPFD_CLOEXEC
+ EPOLL_CTL_ADD = linux.EPOLL_CTL_ADD
+ EPOLL_CLOEXEC = linux.EPOLL_CLOEXEC
+ O_CLOEXEC = linux.O_CLOEXEC
+ O_NONBLOCK = linux.O_NONBLOCK
+ PROT_NONE = linux.PROT_NONE
+ PROT_READ = linux.PROT_READ
+ PROT_WRITE = linux.PROT_WRITE
+ MAP_ANON = linux.MAP_ANON
+ MAP_SHARED = linux.MAP_SHARED
+ MAP_PRIVATE = linux.MAP_PRIVATE
+ PERF_ATTR_SIZE_VER1 = linux.PERF_ATTR_SIZE_VER1
+ PERF_TYPE_SOFTWARE = linux.PERF_TYPE_SOFTWARE
+ PERF_TYPE_TRACEPOINT = linux.PERF_TYPE_TRACEPOINT
+ PERF_COUNT_SW_BPF_OUTPUT = linux.PERF_COUNT_SW_BPF_OUTPUT
+ PERF_EVENT_IOC_DISABLE = linux.PERF_EVENT_IOC_DISABLE
+ PERF_EVENT_IOC_ENABLE = linux.PERF_EVENT_IOC_ENABLE
+ PERF_EVENT_IOC_SET_BPF = linux.PERF_EVENT_IOC_SET_BPF
+ PerfBitWatermark = linux.PerfBitWatermark
+ PerfBitWriteBackward = linux.PerfBitWriteBackward
+ PERF_SAMPLE_RAW = linux.PERF_SAMPLE_RAW
+ PERF_FLAG_FD_CLOEXEC = linux.PERF_FLAG_FD_CLOEXEC
+ RLIM_INFINITY = linux.RLIM_INFINITY
+ RLIMIT_MEMLOCK = linux.RLIMIT_MEMLOCK
+ BPF_STATS_RUN_TIME = linux.BPF_STATS_RUN_TIME
+ PERF_RECORD_LOST = linux.PERF_RECORD_LOST
+ PERF_RECORD_SAMPLE = linux.PERF_RECORD_SAMPLE
+ AT_FDCWD = linux.AT_FDCWD
+ RENAME_NOREPLACE = linux.RENAME_NOREPLACE
+ SO_ATTACH_BPF = linux.SO_ATTACH_BPF
+ SO_DETACH_BPF = linux.SO_DETACH_BPF
+ SOL_SOCKET = linux.SOL_SOCKET
+ SIGPROF = linux.SIGPROF
+ SIG_BLOCK = linux.SIG_BLOCK
+ SIG_UNBLOCK = linux.SIG_UNBLOCK
+ EM_NONE = linux.EM_NONE
+ EM_BPF = linux.EM_BPF
+ BPF_FS_MAGIC = linux.BPF_FS_MAGIC
+ TRACEFS_MAGIC = linux.TRACEFS_MAGIC
+ DEBUGFS_MAGIC = linux.DEBUGFS_MAGIC
+ BPF_RB_NO_WAKEUP = linux.BPF_RB_NO_WAKEUP
+ BPF_RB_FORCE_WAKEUP = linux.BPF_RB_FORCE_WAKEUP
+)
+
+type Statfs_t = linux.Statfs_t
+type Stat_t = linux.Stat_t
+type Rlimit = linux.Rlimit
+type Signal = linux.Signal
+type Sigset_t = linux.Sigset_t
+type PerfEventMmapPage = linux.PerfEventMmapPage
+type EpollEvent = linux.EpollEvent
+type PerfEventAttr = linux.PerfEventAttr
+type Utsname = linux.Utsname
+
+func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) {
+ return linux.Syscall(trap, a1, a2, a3)
+}
+
+func PthreadSigmask(how int, set, oldset *Sigset_t) error {
+ return linux.PthreadSigmask(how, set, oldset)
+}
+
+func FcntlInt(fd uintptr, cmd, arg int) (int, error) {
+ return linux.FcntlInt(fd, cmd, arg)
+}
+
+func IoctlSetInt(fd int, req uint, value int) error {
+ return linux.IoctlSetInt(fd, req, value)
+}
+
+func Statfs(path string, buf *Statfs_t) (err error) {
+ return linux.Statfs(path, buf)
+}
+
+func Close(fd int) (err error) {
+ return linux.Close(fd)
+}
+
+func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) {
+ return linux.EpollWait(epfd, events, msec)
+}
+
+func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) {
+ return linux.EpollCtl(epfd, op, fd, event)
+}
+
+func Eventfd(initval uint, flags int) (fd int, err error) {
+ return linux.Eventfd(initval, flags)
+}
+
+func Write(fd int, p []byte) (n int, err error) {
+ return linux.Write(fd, p)
+}
+
+func EpollCreate1(flag int) (fd int, err error) {
+ return linux.EpollCreate1(flag)
+}
+
+func SetNonblock(fd int, nonblocking bool) (err error) {
+ return linux.SetNonblock(fd, nonblocking)
+}
+
+func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) {
+ return linux.Mmap(fd, offset, length, prot, flags)
+}
+
+func Munmap(b []byte) (err error) {
+ return linux.Munmap(b)
+}
+
+func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) {
+ return linux.PerfEventOpen(attr, pid, cpu, groupFd, flags)
+}
+
+func Uname(buf *Utsname) (err error) {
+ return linux.Uname(buf)
+}
+
+func Getpid() int {
+ return linux.Getpid()
+}
+
+func Gettid() int {
+ return linux.Gettid()
+}
+
+func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) {
+ return linux.Tgkill(tgid, tid, sig)
+}
+
+func BytePtrFromString(s string) (*byte, error) {
+ return linux.BytePtrFromString(s)
+}
+
+func ByteSliceToString(s []byte) string {
+ return linux.ByteSliceToString(s)
+}
+
+func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) error {
+ return linux.Renameat2(olddirfd, oldpath, newdirfd, newpath, flags)
+}
+
+func Prlimit(pid, resource int, new, old *Rlimit) error {
+ return linux.Prlimit(pid, resource, new, old)
+}
+
+func Open(path string, mode int, perm uint32) (int, error) {
+ return linux.Open(path, mode, perm)
+}
+
+func Fstat(fd int, stat *Stat_t) error {
+ return linux.Fstat(fd, stat)
+}
+
+func SetsockoptInt(fd, level, opt, value int) error {
+ return linux.SetsockoptInt(fd, level, opt, value)
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/unix/types_other.go b/vendor/github.com/cilium/ebpf/internal/unix/types_other.go
new file mode 100644
index 000000000..1760e9e79
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/unix/types_other.go
@@ -0,0 +1,296 @@
+//go:build !linux
+
+package unix
+
+import (
+ "fmt"
+ "runtime"
+ "syscall"
+)
+
+var errNonLinux = fmt.Errorf("unsupported platform %s/%s", runtime.GOOS, runtime.GOARCH)
+
+// Errnos are distinct and non-zero.
+const (
+ ENOENT syscall.Errno = iota + 1
+ EEXIST
+ EAGAIN
+ ENOSPC
+ EINVAL
+ EINTR
+ EPERM
+ ESRCH
+ ENODEV
+ EBADF
+ E2BIG
+ EFAULT
+ EACCES
+ EILSEQ
+ EOPNOTSUPP
+)
+
+// Constants are distinct to avoid breaking switch statements.
+const (
+ BPF_F_NO_PREALLOC = iota
+ BPF_F_NUMA_NODE
+ BPF_F_RDONLY
+ BPF_F_WRONLY
+ BPF_F_RDONLY_PROG
+ BPF_F_WRONLY_PROG
+ BPF_F_SLEEPABLE
+ BPF_F_MMAPABLE
+ BPF_F_INNER_MAP
+ BPF_F_KPROBE_MULTI_RETURN
+ BPF_F_XDP_HAS_FRAGS
+ BPF_OBJ_NAME_LEN
+ BPF_TAG_SIZE
+ BPF_RINGBUF_BUSY_BIT
+ BPF_RINGBUF_DISCARD_BIT
+ BPF_RINGBUF_HDR_SZ
+ SYS_BPF
+ F_DUPFD_CLOEXEC
+ EPOLLIN
+ EPOLL_CTL_ADD
+ EPOLL_CLOEXEC
+ O_CLOEXEC
+ O_NONBLOCK
+ PROT_NONE
+ PROT_READ
+ PROT_WRITE
+ MAP_ANON
+ MAP_SHARED
+ MAP_PRIVATE
+ PERF_ATTR_SIZE_VER1
+ PERF_TYPE_SOFTWARE
+ PERF_TYPE_TRACEPOINT
+ PERF_COUNT_SW_BPF_OUTPUT
+ PERF_EVENT_IOC_DISABLE
+ PERF_EVENT_IOC_ENABLE
+ PERF_EVENT_IOC_SET_BPF
+ PerfBitWatermark
+ PerfBitWriteBackward
+ PERF_SAMPLE_RAW
+ PERF_FLAG_FD_CLOEXEC
+ RLIM_INFINITY
+ RLIMIT_MEMLOCK
+ BPF_STATS_RUN_TIME
+ PERF_RECORD_LOST
+ PERF_RECORD_SAMPLE
+ AT_FDCWD
+ RENAME_NOREPLACE
+ SO_ATTACH_BPF
+ SO_DETACH_BPF
+ SOL_SOCKET
+ SIGPROF
+ SIG_BLOCK
+ SIG_UNBLOCK
+ EM_NONE
+ EM_BPF
+ BPF_FS_MAGIC
+ TRACEFS_MAGIC
+ DEBUGFS_MAGIC
+ BPF_RB_NO_WAKEUP
+ BPF_RB_FORCE_WAKEUP
+)
+
+type Statfs_t struct {
+ Type int64
+ Bsize int64
+ Blocks uint64
+ Bfree uint64
+ Bavail uint64
+ Files uint64
+ Ffree uint64
+ Fsid [2]int32
+ Namelen int64
+ Frsize int64
+ Flags int64
+ Spare [4]int64
+}
+
+type Stat_t struct {
+ Dev uint64
+ Ino uint64
+ Nlink uint64
+ Mode uint32
+ Uid uint32
+ Gid uint32
+ _ int32
+ Rdev uint64
+ Size int64
+ Blksize int64
+ Blocks int64
+}
+
+type Rlimit struct {
+ Cur uint64
+ Max uint64
+}
+
+type Signal int
+
+type Sigset_t struct {
+ Val [4]uint64
+}
+
+func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) {
+ return 0, 0, syscall.ENOTSUP
+}
+
+func PthreadSigmask(how int, set, oldset *Sigset_t) error {
+ return errNonLinux
+}
+
+func FcntlInt(fd uintptr, cmd, arg int) (int, error) {
+ return -1, errNonLinux
+}
+
+func IoctlSetInt(fd int, req uint, value int) error {
+ return errNonLinux
+}
+
+func Statfs(path string, buf *Statfs_t) error {
+ return errNonLinux
+}
+
+func Close(fd int) (err error) {
+ return errNonLinux
+}
+
+type EpollEvent struct {
+ Events uint32
+ Fd int32
+ Pad int32
+}
+
+func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) {
+ return 0, errNonLinux
+}
+
+func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) {
+ return errNonLinux
+}
+
+func Eventfd(initval uint, flags int) (fd int, err error) {
+ return 0, errNonLinux
+}
+
+func Write(fd int, p []byte) (n int, err error) {
+ return 0, errNonLinux
+}
+
+func EpollCreate1(flag int) (fd int, err error) {
+ return 0, errNonLinux
+}
+
+type PerfEventMmapPage struct {
+ Version uint32
+ Compat_version uint32
+ Lock uint32
+ Index uint32
+ Offset int64
+ Time_enabled uint64
+ Time_running uint64
+ Capabilities uint64
+ Pmc_width uint16
+ Time_shift uint16
+ Time_mult uint32
+ Time_offset uint64
+ Time_zero uint64
+ Size uint32
+
+ Data_head uint64
+ Data_tail uint64
+ Data_offset uint64
+ Data_size uint64
+ Aux_head uint64
+ Aux_tail uint64
+ Aux_offset uint64
+ Aux_size uint64
+}
+
+func SetNonblock(fd int, nonblocking bool) (err error) {
+ return errNonLinux
+}
+
+func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) {
+ return []byte{}, errNonLinux
+}
+
+func Munmap(b []byte) (err error) {
+ return errNonLinux
+}
+
+type PerfEventAttr struct {
+ Type uint32
+ Size uint32
+ Config uint64
+ Sample uint64
+ Sample_type uint64
+ Read_format uint64
+ Bits uint64
+ Wakeup uint32
+ Bp_type uint32
+ Ext1 uint64
+ Ext2 uint64
+ Branch_sample_type uint64
+ Sample_regs_user uint64
+ Sample_stack_user uint32
+ Clockid int32
+ Sample_regs_intr uint64
+ Aux_watermark uint32
+ Sample_max_stack uint16
+}
+
+func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) {
+ return 0, errNonLinux
+}
+
+type Utsname struct {
+ Release [65]byte
+ Version [65]byte
+}
+
+func Uname(buf *Utsname) (err error) {
+ return errNonLinux
+}
+
+func Getpid() int {
+ return -1
+}
+
+func Gettid() int {
+ return -1
+}
+
+func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) {
+ return errNonLinux
+}
+
+func BytePtrFromString(s string) (*byte, error) {
+ return nil, errNonLinux
+}
+
+func ByteSliceToString(s []byte) string {
+ return ""
+}
+
+func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) error {
+ return errNonLinux
+}
+
+func Prlimit(pid, resource int, new, old *Rlimit) error {
+ return errNonLinux
+}
+
+func Open(path string, mode int, perm uint32) (int, error) {
+ return -1, errNonLinux
+}
+
+func Fstat(fd int, stat *Stat_t) error {
+ return errNonLinux
+}
+
+func SetsockoptInt(fd, level, opt, value int) error {
+ return errNonLinux
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/vdso.go b/vendor/github.com/cilium/ebpf/internal/vdso.go
new file mode 100644
index 000000000..10e639bf0
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/vdso.go
@@ -0,0 +1,153 @@
+package internal
+
+import (
+ "debug/elf"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "os"
+
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+var (
+ errAuxvNoVDSO = errors.New("no vdso address found in auxv")
+)
+
+// vdsoVersion returns the LINUX_VERSION_CODE embedded in the vDSO library
+// linked into the current process image.
+func vdsoVersion() (uint32, error) {
+ // Read data from the auxiliary vector, which is normally passed directly
+ // to the process. Go does not expose that data, so we must read it from procfs.
+ // https://man7.org/linux/man-pages/man3/getauxval.3.html
+ av, err := os.Open("/proc/self/auxv")
+ if errors.Is(err, unix.EACCES) {
+ return 0, fmt.Errorf("opening auxv: %w (process may not be dumpable due to file capabilities)", err)
+ }
+ if err != nil {
+ return 0, fmt.Errorf("opening auxv: %w", err)
+ }
+ defer av.Close()
+
+ vdsoAddr, err := vdsoMemoryAddress(av)
+ if err != nil {
+ return 0, fmt.Errorf("finding vDSO memory address: %w", err)
+ }
+
+ // Use /proc/self/mem rather than unsafe.Pointer tricks.
+ mem, err := os.Open("/proc/self/mem")
+ if err != nil {
+ return 0, fmt.Errorf("opening mem: %w", err)
+ }
+ defer mem.Close()
+
+ // Open ELF at provided memory address, as offset into /proc/self/mem.
+ c, err := vdsoLinuxVersionCode(io.NewSectionReader(mem, int64(vdsoAddr), math.MaxInt64))
+ if err != nil {
+ return 0, fmt.Errorf("reading linux version code: %w", err)
+ }
+
+ return c, nil
+}
+
+// vdsoMemoryAddress returns the memory address of the vDSO library
+// linked into the current process image. r is an io.Reader into an auxv blob.
+func vdsoMemoryAddress(r io.Reader) (uint64, error) {
+ const (
+ _AT_NULL = 0 // End of vector
+ _AT_SYSINFO_EHDR = 33 // Offset to vDSO blob in process image
+ )
+
+ // Loop through all tag/value pairs in auxv until we find `AT_SYSINFO_EHDR`,
+ // the address of a page containing the virtual Dynamic Shared Object (vDSO).
+ aux := struct{ Tag, Val uint64 }{}
+ for {
+ if err := binary.Read(r, NativeEndian, &aux); err != nil {
+ return 0, fmt.Errorf("reading auxv entry: %w", err)
+ }
+
+ switch aux.Tag {
+ case _AT_SYSINFO_EHDR:
+ if aux.Val != 0 {
+ return aux.Val, nil
+ }
+ return 0, fmt.Errorf("invalid vDSO address in auxv")
+ // _AT_NULL is always the last tag/val pair in the aux vector
+ // and can be treated like EOF.
+ case _AT_NULL:
+ return 0, errAuxvNoVDSO
+ }
+ }
+}
+
+// format described at https://www.man7.org/linux/man-pages/man5/elf.5.html in section 'Notes (Nhdr)'
+type elfNoteHeader struct {
+ NameSize int32
+ DescSize int32
+ Type int32
+}
+
+// vdsoLinuxVersionCode returns the LINUX_VERSION_CODE embedded in
+// the ELF notes section of the binary provided by the reader.
+func vdsoLinuxVersionCode(r io.ReaderAt) (uint32, error) {
+ hdr, err := NewSafeELFFile(r)
+ if err != nil {
+ return 0, fmt.Errorf("reading vDSO ELF: %w", err)
+ }
+
+ sections := hdr.SectionsByType(elf.SHT_NOTE)
+ if len(sections) == 0 {
+ return 0, fmt.Errorf("no note section found in vDSO ELF")
+ }
+
+ for _, sec := range sections {
+ sr := sec.Open()
+ var n elfNoteHeader
+
+ // Read notes until we find one named 'Linux'.
+ for {
+ if err := binary.Read(sr, hdr.ByteOrder, &n); err != nil {
+ if errors.Is(err, io.EOF) {
+ // We looked at all the notes in this section
+ break
+ }
+ return 0, fmt.Errorf("reading note header: %w", err)
+ }
+
+ // If a note name is defined, it follows the note header.
+ var name string
+ if n.NameSize > 0 {
+ // Read the note name, aligned to 4 bytes.
+ buf := make([]byte, Align(n.NameSize, 4))
+ if err := binary.Read(sr, hdr.ByteOrder, &buf); err != nil {
+ return 0, fmt.Errorf("reading note name: %w", err)
+ }
+
+ // Read nul-terminated string.
+ name = unix.ByteSliceToString(buf[:n.NameSize])
+ }
+
+ // If a note descriptor is defined, it follows the name.
+ // It is possible for a note to have a descriptor but not a name.
+ if n.DescSize > 0 {
+ // LINUX_VERSION_CODE is a uint32 value.
+ if name == "Linux" && n.DescSize == 4 && n.Type == 0 {
+ var version uint32
+ if err := binary.Read(sr, hdr.ByteOrder, &version); err != nil {
+ return 0, fmt.Errorf("reading note descriptor: %w", err)
+ }
+ return version, nil
+ }
+
+ // Discard the note descriptor if it exists but we're not interested in it.
+ if _, err := io.CopyN(io.Discard, sr, int64(Align(n.DescSize, 4))); err != nil {
+ return 0, err
+ }
+ }
+ }
+ }
+
+ return 0, fmt.Errorf("no Linux note in ELF")
+}
diff --git a/vendor/github.com/cilium/ebpf/internal/version.go b/vendor/github.com/cilium/ebpf/internal/version.go
new file mode 100644
index 000000000..9b17ffb44
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/internal/version.go
@@ -0,0 +1,106 @@
+package internal
+
+import (
+ "fmt"
+
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+const (
+ // Version constant used in ELF binaries indicating that the loader needs to
+ // substitute the eBPF program's version with the value of the kernel's
+ // KERNEL_VERSION compile-time macro. Used for compatibility with BCC, gobpf
+ // and RedSift.
+ MagicKernelVersion = 0xFFFFFFFE
+)
+
+// A Version in the form Major.Minor.Patch.
+type Version [3]uint16
+
+// NewVersion creates a version from a string like "Major.Minor.Patch".
+//
+// Patch is optional.
+func NewVersion(ver string) (Version, error) {
+ var major, minor, patch uint16
+ n, _ := fmt.Sscanf(ver, "%d.%d.%d", &major, &minor, &patch)
+ if n < 2 {
+ return Version{}, fmt.Errorf("invalid version: %s", ver)
+ }
+ return Version{major, minor, patch}, nil
+}
+
+// NewVersionFromCode creates a version from a LINUX_VERSION_CODE.
+func NewVersionFromCode(code uint32) Version {
+ return Version{
+ uint16(uint8(code >> 16)),
+ uint16(uint8(code >> 8)),
+ uint16(uint8(code)),
+ }
+}
+
+func (v Version) String() string {
+ if v[2] == 0 {
+ return fmt.Sprintf("v%d.%d", v[0], v[1])
+ }
+ return fmt.Sprintf("v%d.%d.%d", v[0], v[1], v[2])
+}
+
+// Less returns true if the version is less than another version.
+func (v Version) Less(other Version) bool {
+ for i, a := range v {
+ if a == other[i] {
+ continue
+ }
+ return a < other[i]
+ }
+ return false
+}
+
+// Unspecified returns true if the version is all zero.
+func (v Version) Unspecified() bool {
+ return v[0] == 0 && v[1] == 0 && v[2] == 0
+}
+
+// Kernel implements the kernel's KERNEL_VERSION macro from linux/version.h.
+// It represents the kernel version and patch level as a single value.
+func (v Version) Kernel() uint32 {
+
+ // Kernels 4.4 and 4.9 have their SUBLEVEL clamped to 255 to avoid
+ // overflowing into PATCHLEVEL.
+ // See kernel commit 9b82f13e7ef3 ("kbuild: clamp SUBLEVEL to 255").
+ s := v[2]
+ if s > 255 {
+ s = 255
+ }
+
+ // Truncate members to uint8 to prevent them from spilling over into
+ // each other when overflowing 8 bits.
+ return uint32(uint8(v[0]))<<16 | uint32(uint8(v[1]))<<8 | uint32(uint8(s))
+}
+
+// KernelVersion returns the version of the currently running kernel.
+var KernelVersion = Memoize(func() (Version, error) {
+ return detectKernelVersion()
+})
+
+// detectKernelVersion returns the version of the running kernel.
+func detectKernelVersion() (Version, error) {
+ vc, err := vdsoVersion()
+ if err != nil {
+ return Version{}, err
+ }
+ return NewVersionFromCode(vc), nil
+}
+
+// KernelRelease returns the release string of the running kernel.
+// Its format depends on the Linux distribution and corresponds to directory
+// names in /lib/modules by convention. Some examples are 5.15.17-1-lts and
+// 4.19.0-16-amd64.
+func KernelRelease() (string, error) {
+ var uname unix.Utsname
+ if err := unix.Uname(&uname); err != nil {
+ return "", fmt.Errorf("uname failed: %w", err)
+ }
+
+ return unix.ByteSliceToString(uname.Release[:]), nil
+}
diff --git a/vendor/github.com/cilium/ebpf/link/cgroup.go b/vendor/github.com/cilium/ebpf/link/cgroup.go
new file mode 100644
index 000000000..58e85fe9d
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/link/cgroup.go
@@ -0,0 +1,190 @@
+package link
+
+import (
+ "errors"
+ "fmt"
+ "os"
+
+ "github.com/cilium/ebpf"
+)
+
+type cgroupAttachFlags uint32
+
+const (
+ // Allow programs attached to sub-cgroups to override the verdict of this
+ // program.
+ flagAllowOverride cgroupAttachFlags = 1 << iota
+ // Allow attaching multiple programs to the cgroup. Only works if the cgroup
+ // has zero or more programs attached using the Multi flag. Implies override.
+ flagAllowMulti
+ // Set automatically by progAttachCgroup.Update(). Used for updating a
+ // specific given program attached in multi-mode.
+ flagReplace
+)
+
+type CgroupOptions struct {
+ // Path to a cgroupv2 folder.
+ Path string
+ // One of the AttachCgroup* constants
+ Attach ebpf.AttachType
+ // Program must be of type CGroup*, and the attach type must match Attach.
+ Program *ebpf.Program
+}
+
+// AttachCgroup links a BPF program to a cgroup.
+//
+// If the running kernel doesn't support bpf_link, attempts to emulate its
+// semantics using the legacy PROG_ATTACH mechanism. If bpf_link is not
+// available, the returned [Link] will not support pinning to bpffs.
+//
+// If you need more control over attachment flags or the attachment mechanism
+// used, look at [RawAttachProgram] and [AttachRawLink] instead.
+func AttachCgroup(opts CgroupOptions) (cg Link, err error) {
+ cgroup, err := os.Open(opts.Path)
+ if err != nil {
+ return nil, fmt.Errorf("can't open cgroup: %s", err)
+ }
+ defer func() {
+ if _, ok := cg.(*progAttachCgroup); ok {
+ // Skip closing the cgroup handle if we return a valid progAttachCgroup,
+ // where the handle is retained to implement Update().
+ return
+ }
+ cgroup.Close()
+ }()
+
+ cg, err = newLinkCgroup(cgroup, opts.Attach, opts.Program)
+ if err == nil {
+ return cg, nil
+ }
+
+ if errors.Is(err, ErrNotSupported) {
+ cg, err = newProgAttachCgroup(cgroup, opts.Attach, opts.Program, flagAllowMulti)
+ }
+ if errors.Is(err, ErrNotSupported) {
+ cg, err = newProgAttachCgroup(cgroup, opts.Attach, opts.Program, flagAllowOverride)
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ return cg, nil
+}
+
+type progAttachCgroup struct {
+ cgroup *os.File
+ current *ebpf.Program
+ attachType ebpf.AttachType
+ flags cgroupAttachFlags
+}
+
+var _ Link = (*progAttachCgroup)(nil)
+
+func (cg *progAttachCgroup) isLink() {}
+
+// newProgAttachCgroup attaches prog to cgroup using BPF_PROG_ATTACH.
+// cgroup and prog are retained by [progAttachCgroup].
+func newProgAttachCgroup(cgroup *os.File, attach ebpf.AttachType, prog *ebpf.Program, flags cgroupAttachFlags) (*progAttachCgroup, error) {
+ if flags&flagAllowMulti > 0 {
+ if err := haveProgAttachReplace(); err != nil {
+ return nil, fmt.Errorf("can't support multiple programs: %w", err)
+ }
+ }
+
+ // Use a program handle that cannot be closed by the caller.
+ clone, err := prog.Clone()
+ if err != nil {
+ return nil, err
+ }
+
+ err = RawAttachProgram(RawAttachProgramOptions{
+ Target: int(cgroup.Fd()),
+ Program: clone,
+ Flags: uint32(flags),
+ Attach: attach,
+ })
+ if err != nil {
+ clone.Close()
+ return nil, fmt.Errorf("cgroup: %w", err)
+ }
+
+ return &progAttachCgroup{cgroup, clone, attach, flags}, nil
+}
+
+func (cg *progAttachCgroup) Close() error {
+ defer cg.cgroup.Close()
+ defer cg.current.Close()
+
+ err := RawDetachProgram(RawDetachProgramOptions{
+ Target: int(cg.cgroup.Fd()),
+ Program: cg.current,
+ Attach: cg.attachType,
+ })
+ if err != nil {
+ return fmt.Errorf("close cgroup: %s", err)
+ }
+ return nil
+}
+
+func (cg *progAttachCgroup) Update(prog *ebpf.Program) error {
+ new, err := prog.Clone()
+ if err != nil {
+ return err
+ }
+
+ args := RawAttachProgramOptions{
+ Target: int(cg.cgroup.Fd()),
+ Program: prog,
+ Attach: cg.attachType,
+ Flags: uint32(cg.flags),
+ }
+
+ if cg.flags&flagAllowMulti > 0 {
+ // Atomically replacing multiple programs requires at least
+ // 5.5 (commit 7dd68b3279f17921 "bpf: Support replacing cgroup-bpf
+ // program in MULTI mode")
+ args.Flags |= uint32(flagReplace)
+ args.Replace = cg.current
+ }
+
+ if err := RawAttachProgram(args); err != nil {
+ new.Close()
+ return fmt.Errorf("can't update cgroup: %s", err)
+ }
+
+ cg.current.Close()
+ cg.current = new
+ return nil
+}
+
+func (cg *progAttachCgroup) Pin(string) error {
+ return fmt.Errorf("can't pin cgroup: %w", ErrNotSupported)
+}
+
+func (cg *progAttachCgroup) Unpin() error {
+ return fmt.Errorf("can't unpin cgroup: %w", ErrNotSupported)
+}
+
+func (cg *progAttachCgroup) Info() (*Info, error) {
+ return nil, fmt.Errorf("can't get cgroup info: %w", ErrNotSupported)
+}
+
+type linkCgroup struct {
+ RawLink
+}
+
+var _ Link = (*linkCgroup)(nil)
+
+// newLinkCgroup attaches prog to cgroup using BPF_LINK_CREATE.
+func newLinkCgroup(cgroup *os.File, attach ebpf.AttachType, prog *ebpf.Program) (*linkCgroup, error) {
+ link, err := AttachRawLink(RawLinkOptions{
+ Target: int(cgroup.Fd()),
+ Program: prog,
+ Attach: attach,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return &linkCgroup{*link}, err
+}
diff --git a/vendor/github.com/cilium/ebpf/link/doc.go b/vendor/github.com/cilium/ebpf/link/doc.go
new file mode 100644
index 000000000..2bde35ed7
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/link/doc.go
@@ -0,0 +1,2 @@
+// Package link allows attaching eBPF programs to various kernel hooks.
+package link
diff --git a/vendor/github.com/cilium/ebpf/link/iter.go b/vendor/github.com/cilium/ebpf/link/iter.go
new file mode 100644
index 000000000..0a39faef8
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/link/iter.go
@@ -0,0 +1,84 @@
+package link
+
+import (
+ "fmt"
+ "io"
+ "unsafe"
+
+ "github.com/cilium/ebpf"
+ "github.com/cilium/ebpf/internal/sys"
+)
+
+type IterOptions struct {
+ // Program must be of type Tracing with attach type
+ // AttachTraceIter. The kind of iterator to attach to is
+ // determined at load time via the AttachTo field.
+ //
+ // AttachTo requires the kernel to include BTF of itself,
+ // and it to be compiled with a recent pahole (>= 1.16).
+ Program *ebpf.Program
+
+ // Map specifies the target map for bpf_map_elem and sockmap iterators.
+ // It may be nil.
+ Map *ebpf.Map
+}
+
+// AttachIter attaches a BPF seq_file iterator.
+func AttachIter(opts IterOptions) (*Iter, error) {
+ progFd := opts.Program.FD()
+ if progFd < 0 {
+ return nil, fmt.Errorf("invalid program: %s", sys.ErrClosedFd)
+ }
+
+ var info bpfIterLinkInfoMap
+ if opts.Map != nil {
+ mapFd := opts.Map.FD()
+ if mapFd < 0 {
+ return nil, fmt.Errorf("invalid map: %w", sys.ErrClosedFd)
+ }
+ info.map_fd = uint32(mapFd)
+ }
+
+ attr := sys.LinkCreateIterAttr{
+ ProgFd: uint32(progFd),
+ AttachType: sys.AttachType(ebpf.AttachTraceIter),
+ IterInfo: sys.NewPointer(unsafe.Pointer(&info)),
+ IterInfoLen: uint32(unsafe.Sizeof(info)),
+ }
+
+ fd, err := sys.LinkCreateIter(&attr)
+ if err != nil {
+ if haveFeatErr := haveBPFLink(); haveFeatErr != nil {
+ return nil, haveFeatErr
+ }
+ return nil, fmt.Errorf("can't link iterator: %w", err)
+ }
+
+ return &Iter{RawLink{fd, ""}}, err
+}
+
+// Iter represents an attached bpf_iter.
+type Iter struct {
+ RawLink
+}
+
+// Open creates a new instance of the iterator.
+//
+// Reading from the returned reader triggers the BPF program.
+func (it *Iter) Open() (io.ReadCloser, error) {
+ attr := &sys.IterCreateAttr{
+ LinkFd: it.fd.Uint(),
+ }
+
+ fd, err := sys.IterCreate(attr)
+ if err != nil {
+ return nil, fmt.Errorf("can't create iterator: %w", err)
+ }
+
+ return fd.File("bpf_iter"), nil
+}
+
+// union bpf_iter_link_info.map
+type bpfIterLinkInfoMap struct {
+ map_fd uint32
+}
diff --git a/vendor/github.com/cilium/ebpf/link/kprobe.go b/vendor/github.com/cilium/ebpf/link/kprobe.go
new file mode 100644
index 000000000..b54ca9085
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/link/kprobe.go
@@ -0,0 +1,357 @@
+package link
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "runtime"
+ "strings"
+ "unsafe"
+
+ "github.com/cilium/ebpf"
+ "github.com/cilium/ebpf/internal"
+ "github.com/cilium/ebpf/internal/sys"
+ "github.com/cilium/ebpf/internal/tracefs"
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+// KprobeOptions defines additional parameters that will be used
+// when loading Kprobes.
+type KprobeOptions struct {
+ // Arbitrary value that can be fetched from an eBPF program
+ // via `bpf_get_attach_cookie()`.
+ //
+ // Needs kernel 5.15+.
+ Cookie uint64
+ // Offset of the kprobe relative to the traced symbol.
+ // Can be used to insert kprobes at arbitrary offsets in kernel functions,
+ // e.g. in places where functions have been inlined.
+ Offset uint64
+ // Increase the maximum number of concurrent invocations of a kretprobe.
+ // Required when tracing some long running functions in the kernel.
+ //
+ // Deprecated: this setting forces the use of an outdated kernel API and is not portable
+ // across kernel versions.
+ RetprobeMaxActive int
+ // Prefix used for the event name if the kprobe must be attached using tracefs.
+ // The group name will be formatted as `_`.
+ // The default empty string is equivalent to "ebpf" as the prefix.
+ TraceFSPrefix string
+}
+
+func (ko *KprobeOptions) cookie() uint64 {
+ if ko == nil {
+ return 0
+ }
+ return ko.Cookie
+}
+
+// Kprobe attaches the given eBPF program to a perf event that fires when the
+// given kernel symbol starts executing. See /proc/kallsyms for available
+// symbols. For example, printk():
+//
+// kp, err := Kprobe("printk", prog, nil)
+//
+// Losing the reference to the resulting Link (kp) will close the Kprobe
+// and prevent further execution of prog. The Link must be Closed during
+// program shutdown to avoid leaking system resources.
+//
+// If attaching to symbol fails, automatically retries with the running
+// platform's syscall prefix (e.g. __x64_) to support attaching to syscalls
+// in a portable fashion.
+func Kprobe(symbol string, prog *ebpf.Program, opts *KprobeOptions) (Link, error) {
+ k, err := kprobe(symbol, prog, opts, false)
+ if err != nil {
+ return nil, err
+ }
+
+ lnk, err := attachPerfEvent(k, prog, opts.cookie())
+ if err != nil {
+ k.Close()
+ return nil, err
+ }
+
+ return lnk, nil
+}
+
+// Kretprobe attaches the given eBPF program to a perf event that fires right
+// before the given kernel symbol exits, with the function stack left intact.
+// See /proc/kallsyms for available symbols. For example, printk():
+//
+// kp, err := Kretprobe("printk", prog, nil)
+//
+// Losing the reference to the resulting Link (kp) will close the Kretprobe
+// and prevent further execution of prog. The Link must be Closed during
+// program shutdown to avoid leaking system resources.
+//
+// If attaching to symbol fails, automatically retries with the running
+// platform's syscall prefix (e.g. __x64_) to support attaching to syscalls
+// in a portable fashion.
+//
+// On kernels 5.10 and earlier, setting a kretprobe on a nonexistent symbol
+// incorrectly returns unix.EINVAL instead of os.ErrNotExist.
+func Kretprobe(symbol string, prog *ebpf.Program, opts *KprobeOptions) (Link, error) {
+ k, err := kprobe(symbol, prog, opts, true)
+ if err != nil {
+ return nil, err
+ }
+
+ lnk, err := attachPerfEvent(k, prog, opts.cookie())
+ if err != nil {
+ k.Close()
+ return nil, err
+ }
+
+ return lnk, nil
+}
+
+// isValidKprobeSymbol implements the equivalent of a regex match
+// against "^[a-zA-Z_][0-9a-zA-Z_.]*$".
+func isValidKprobeSymbol(s string) bool {
+ if len(s) < 1 {
+ return false
+ }
+
+ for i, c := range []byte(s) {
+ switch {
+ case c >= 'a' && c <= 'z':
+ case c >= 'A' && c <= 'Z':
+ case c == '_':
+ case i > 0 && c >= '0' && c <= '9':
+
+ // Allow `.` in symbol name. GCC-compiled kernel may change symbol name
+ // to have a `.isra.$n` suffix, like `udp_send_skb.isra.52`.
+ // See: https://gcc.gnu.org/gcc-10/changes.html
+ case i > 0 && c == '.':
+
+ default:
+ return false
+ }
+ }
+
+ return true
+}
+
+// kprobe opens a perf event on the given symbol and attaches prog to it.
+// If ret is true, create a kretprobe.
+func kprobe(symbol string, prog *ebpf.Program, opts *KprobeOptions, ret bool) (*perfEvent, error) {
+ if symbol == "" {
+ return nil, fmt.Errorf("symbol name cannot be empty: %w", errInvalidInput)
+ }
+ if prog == nil {
+ return nil, fmt.Errorf("prog cannot be nil: %w", errInvalidInput)
+ }
+ if !isValidKprobeSymbol(symbol) {
+ return nil, fmt.Errorf("symbol '%s' must be a valid symbol in /proc/kallsyms: %w", symbol, errInvalidInput)
+ }
+ if prog.Type() != ebpf.Kprobe {
+ return nil, fmt.Errorf("eBPF program type %s is not a Kprobe: %w", prog.Type(), errInvalidInput)
+ }
+
+ args := tracefs.ProbeArgs{
+ Type: tracefs.Kprobe,
+ Pid: perfAllThreads,
+ Symbol: symbol,
+ Ret: ret,
+ }
+
+ if opts != nil {
+ args.RetprobeMaxActive = opts.RetprobeMaxActive
+ args.Cookie = opts.Cookie
+ args.Offset = opts.Offset
+ args.Group = opts.TraceFSPrefix
+ }
+
+ // Use kprobe PMU if the kernel has it available.
+ tp, err := pmuProbe(args)
+ if errors.Is(err, os.ErrNotExist) || errors.Is(err, unix.EINVAL) {
+ if prefix := internal.PlatformPrefix(); prefix != "" {
+ args.Symbol = prefix + symbol
+ tp, err = pmuProbe(args)
+ }
+ }
+ if err == nil {
+ return tp, nil
+ }
+ if err != nil && !errors.Is(err, ErrNotSupported) {
+ return nil, fmt.Errorf("creating perf_kprobe PMU (arch-specific fallback for %q): %w", symbol, err)
+ }
+
+ // Use tracefs if kprobe PMU is missing.
+ args.Symbol = symbol
+ tp, err = tracefsProbe(args)
+ if errors.Is(err, os.ErrNotExist) || errors.Is(err, unix.EINVAL) {
+ if prefix := internal.PlatformPrefix(); prefix != "" {
+ args.Symbol = prefix + symbol
+ tp, err = tracefsProbe(args)
+ }
+ }
+ if err != nil {
+ return nil, fmt.Errorf("creating tracefs event (arch-specific fallback for %q): %w", symbol, err)
+ }
+
+ return tp, nil
+}
+
+// pmuProbe opens a perf event based on a Performance Monitoring Unit.
+//
+// Requires at least a 4.17 kernel.
+// e12f03d7031a "perf/core: Implement the 'perf_kprobe' PMU"
+// 33ea4b24277b "perf/core: Implement the 'perf_uprobe' PMU"
+//
+// Returns ErrNotSupported if the kernel doesn't support perf_[k,u]probe PMU
+func pmuProbe(args tracefs.ProbeArgs) (*perfEvent, error) {
+ // Getting the PMU type will fail if the kernel doesn't support
+ // the perf_[k,u]probe PMU.
+ eventType, err := internal.ReadUint64FromFileOnce("%d\n", "/sys/bus/event_source/devices", args.Type.String(), "type")
+ if errors.Is(err, os.ErrNotExist) {
+ return nil, fmt.Errorf("%s: %w", args.Type, ErrNotSupported)
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ // Use tracefs if we want to set kretprobe's retprobeMaxActive.
+ if args.RetprobeMaxActive != 0 {
+ return nil, fmt.Errorf("pmu probe: non-zero retprobeMaxActive: %w", ErrNotSupported)
+ }
+
+ var config uint64
+ if args.Ret {
+ bit, err := internal.ReadUint64FromFileOnce("config:%d\n", "/sys/bus/event_source/devices", args.Type.String(), "/format/retprobe")
+ if err != nil {
+ return nil, err
+ }
+ config |= 1 << bit
+ }
+
+ var (
+ attr unix.PerfEventAttr
+ sp unsafe.Pointer
+ token string
+ )
+ switch args.Type {
+ case tracefs.Kprobe:
+ // Create a pointer to a NUL-terminated string for the kernel.
+ sp, err = unsafeStringPtr(args.Symbol)
+ if err != nil {
+ return nil, err
+ }
+
+ token = tracefs.KprobeToken(args)
+
+ attr = unix.PerfEventAttr{
+ // The minimum size required for PMU kprobes is PERF_ATTR_SIZE_VER1,
+ // since it added the config2 (Ext2) field. Use Ext2 as probe_offset.
+ Size: unix.PERF_ATTR_SIZE_VER1,
+ Type: uint32(eventType), // PMU event type read from sysfs
+ Ext1: uint64(uintptr(sp)), // Kernel symbol to trace
+ Ext2: args.Offset, // Kernel symbol offset
+ Config: config, // Retprobe flag
+ }
+ case tracefs.Uprobe:
+ sp, err = unsafeStringPtr(args.Path)
+ if err != nil {
+ return nil, err
+ }
+
+ if args.RefCtrOffset != 0 {
+ config |= args.RefCtrOffset << uprobeRefCtrOffsetShift
+ }
+
+ token = tracefs.UprobeToken(args)
+
+ attr = unix.PerfEventAttr{
+ // The minimum size required for PMU uprobes is PERF_ATTR_SIZE_VER1,
+ // since it added the config2 (Ext2) field. The Size field controls the
+ // size of the internal buffer the kernel allocates for reading the
+ // perf_event_attr argument from userspace.
+ Size: unix.PERF_ATTR_SIZE_VER1,
+ Type: uint32(eventType), // PMU event type read from sysfs
+ Ext1: uint64(uintptr(sp)), // Uprobe path
+ Ext2: args.Offset, // Uprobe offset
+ Config: config, // RefCtrOffset, Retprobe flag
+ }
+ }
+
+ rawFd, err := unix.PerfEventOpen(&attr, args.Pid, 0, -1, unix.PERF_FLAG_FD_CLOEXEC)
+
+ // On some old kernels, kprobe PMU doesn't allow `.` in symbol names and
+ // return -EINVAL. Return ErrNotSupported to allow falling back to tracefs.
+ // https://github.com/torvalds/linux/blob/94710cac0ef4/kernel/trace/trace_kprobe.c#L340-L343
+ if errors.Is(err, unix.EINVAL) && strings.Contains(args.Symbol, ".") {
+ return nil, fmt.Errorf("token %s: older kernels don't accept dots: %w", token, ErrNotSupported)
+ }
+ // Since commit 97c753e62e6c, ENOENT is correctly returned instead of EINVAL
+ // when trying to create a retprobe for a missing symbol.
+ if errors.Is(err, os.ErrNotExist) {
+ return nil, fmt.Errorf("token %s: not found: %w", token, err)
+ }
+ // Since commit ab105a4fb894, EILSEQ is returned when a kprobe sym+offset is resolved
+ // to an invalid insn boundary. The exact conditions that trigger this error are
+ // arch specific however.
+ if errors.Is(err, unix.EILSEQ) {
+ return nil, fmt.Errorf("token %s: bad insn boundary: %w", token, os.ErrNotExist)
+ }
+ // Since at least commit cb9a19fe4aa51, ENOTSUPP is returned
+ // when attempting to set a uprobe on a trap instruction.
+ if errors.Is(err, sys.ENOTSUPP) {
+ return nil, fmt.Errorf("token %s: failed setting uprobe on offset %#x (possible trap insn): %w", token, args.Offset, err)
+ }
+
+ if err != nil {
+ return nil, fmt.Errorf("token %s: opening perf event: %w", token, err)
+ }
+
+ // Ensure the string pointer is not collected before PerfEventOpen returns.
+ runtime.KeepAlive(sp)
+
+ fd, err := sys.NewFD(rawFd)
+ if err != nil {
+ return nil, err
+ }
+
+ // Kernel has perf_[k,u]probe PMU available, initialize perf event.
+ return newPerfEvent(fd, nil), nil
+}
+
+// tracefsProbe creates a trace event by writing an entry to /[k,u]probe_events.
+// A new trace event group name is generated on every call to support creating
+// multiple trace events for the same kernel or userspace symbol.
+// Path and offset are only set in the case of uprobe(s) and are used to set
+// the executable/library path on the filesystem and the offset where the probe is inserted.
+// A perf event is then opened on the newly-created trace event and returned to the caller.
+func tracefsProbe(args tracefs.ProbeArgs) (*perfEvent, error) {
+ groupPrefix := "ebpf"
+ if args.Group != "" {
+ groupPrefix = args.Group
+ }
+
+ // Generate a random string for each trace event we attempt to create.
+ // This value is used as the 'group' token in tracefs to allow creating
+ // multiple kprobe trace events with the same name.
+ group, err := tracefs.RandomGroup(groupPrefix)
+ if err != nil {
+ return nil, fmt.Errorf("randomizing group name: %w", err)
+ }
+ args.Group = group
+
+ // Create the [k,u]probe trace event using tracefs.
+ evt, err := tracefs.NewEvent(args)
+ if err != nil {
+ return nil, fmt.Errorf("creating probe entry on tracefs: %w", err)
+ }
+
+ // Kprobes are ephemeral tracepoints and share the same perf event type.
+ fd, err := openTracepointPerfEvent(evt.ID(), args.Pid)
+ if err != nil {
+ // Make sure we clean up the created tracefs event when we return error.
+ // If a livepatch handler is already active on the symbol, the write to
+ // tracefs will succeed, a trace event will show up, but creating the
+ // perf event will fail with EBUSY.
+ _ = evt.Close()
+ return nil, err
+ }
+
+ return newPerfEvent(fd, evt), nil
+}
diff --git a/vendor/github.com/cilium/ebpf/link/kprobe_multi.go b/vendor/github.com/cilium/ebpf/link/kprobe_multi.go
new file mode 100644
index 000000000..4d364d80e
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/link/kprobe_multi.go
@@ -0,0 +1,180 @@
+package link
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "unsafe"
+
+ "github.com/cilium/ebpf"
+ "github.com/cilium/ebpf/asm"
+ "github.com/cilium/ebpf/internal"
+ "github.com/cilium/ebpf/internal/sys"
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+// KprobeMultiOptions defines additional parameters that will be used
+// when opening a KprobeMulti Link.
+type KprobeMultiOptions struct {
+ // Symbols takes a list of kernel symbol names to attach an ebpf program to.
+ //
+ // Mutually exclusive with Addresses.
+ Symbols []string
+
+ // Addresses takes a list of kernel symbol addresses in case they can not
+ // be referred to by name.
+ //
+ // Note that only start addresses can be specified, since the fprobe API
+ // limits the attach point to the function entry or return.
+ //
+ // Mutually exclusive with Symbols.
+ Addresses []uintptr
+
+ // Cookies specifies arbitrary values that can be fetched from an eBPF
+ // program via `bpf_get_attach_cookie()`.
+ //
+ // If set, its length should be equal to the length of Symbols or Addresses.
+ // Each Cookie is assigned to the Symbol or Address specified at the
+ // corresponding slice index.
+ Cookies []uint64
+}
+
+// KprobeMulti attaches the given eBPF program to the entry point of a given set
+// of kernel symbols.
+//
+// The difference with Kprobe() is that multi-kprobe accomplishes this in a
+// single system call, making it significantly faster than attaching many
+// probes one at a time.
+//
+// Requires at least Linux 5.18.
+func KprobeMulti(prog *ebpf.Program, opts KprobeMultiOptions) (Link, error) {
+ return kprobeMulti(prog, opts, 0)
+}
+
+// KretprobeMulti attaches the given eBPF program to the return point of a given
+// set of kernel symbols.
+//
+// The difference with Kretprobe() is that multi-kprobe accomplishes this in a
+// single system call, making it significantly faster than attaching many
+// probes one at a time.
+//
+// Requires at least Linux 5.18.
+func KretprobeMulti(prog *ebpf.Program, opts KprobeMultiOptions) (Link, error) {
+ return kprobeMulti(prog, opts, unix.BPF_F_KPROBE_MULTI_RETURN)
+}
+
+func kprobeMulti(prog *ebpf.Program, opts KprobeMultiOptions, flags uint32) (Link, error) {
+ if prog == nil {
+ return nil, errors.New("cannot attach a nil program")
+ }
+
+ syms := uint32(len(opts.Symbols))
+ addrs := uint32(len(opts.Addresses))
+ cookies := uint32(len(opts.Cookies))
+
+ if syms == 0 && addrs == 0 {
+ return nil, fmt.Errorf("one of Symbols or Addresses is required: %w", errInvalidInput)
+ }
+ if syms != 0 && addrs != 0 {
+ return nil, fmt.Errorf("Symbols and Addresses are mutually exclusive: %w", errInvalidInput)
+ }
+ if cookies > 0 && cookies != syms && cookies != addrs {
+ return nil, fmt.Errorf("Cookies must be exactly Symbols or Addresses in length: %w", errInvalidInput)
+ }
+
+ attr := &sys.LinkCreateKprobeMultiAttr{
+ ProgFd: uint32(prog.FD()),
+ AttachType: sys.BPF_TRACE_KPROBE_MULTI,
+ KprobeMultiFlags: flags,
+ }
+
+ switch {
+ case syms != 0:
+ attr.Count = syms
+ attr.Syms = sys.NewStringSlicePointer(opts.Symbols)
+
+ case addrs != 0:
+ attr.Count = addrs
+ attr.Addrs = sys.NewPointer(unsafe.Pointer(&opts.Addresses[0]))
+ }
+
+ if cookies != 0 {
+ attr.Cookies = sys.NewPointer(unsafe.Pointer(&opts.Cookies[0]))
+ }
+
+ fd, err := sys.LinkCreateKprobeMulti(attr)
+ if errors.Is(err, unix.ESRCH) {
+ return nil, fmt.Errorf("couldn't find one or more symbols: %w", os.ErrNotExist)
+ }
+ if errors.Is(err, unix.EINVAL) {
+ return nil, fmt.Errorf("%w (missing kernel symbol or prog's AttachType not AttachTraceKprobeMulti?)", err)
+ }
+
+ if err != nil {
+ if haveFeatErr := haveBPFLinkKprobeMulti(); haveFeatErr != nil {
+ return nil, haveFeatErr
+ }
+ return nil, err
+ }
+
+ return &kprobeMultiLink{RawLink{fd, ""}}, nil
+}
+
+type kprobeMultiLink struct {
+ RawLink
+}
+
+var _ Link = (*kprobeMultiLink)(nil)
+
+func (kml *kprobeMultiLink) Update(prog *ebpf.Program) error {
+ return fmt.Errorf("update kprobe_multi: %w", ErrNotSupported)
+}
+
+func (kml *kprobeMultiLink) Pin(string) error {
+ return fmt.Errorf("pin kprobe_multi: %w", ErrNotSupported)
+}
+
+func (kml *kprobeMultiLink) Unpin() error {
+ return fmt.Errorf("unpin kprobe_multi: %w", ErrNotSupported)
+}
+
+var haveBPFLinkKprobeMulti = internal.NewFeatureTest("bpf_link_kprobe_multi", "5.18", func() error {
+ prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{
+ Name: "probe_kpm_link",
+ Type: ebpf.Kprobe,
+ Instructions: asm.Instructions{
+ asm.Mov.Imm(asm.R0, 0),
+ asm.Return(),
+ },
+ AttachType: ebpf.AttachTraceKprobeMulti,
+ License: "MIT",
+ })
+ if errors.Is(err, unix.E2BIG) {
+ // Kernel doesn't support AttachType field.
+ return internal.ErrNotSupported
+ }
+ if err != nil {
+ return err
+ }
+ defer prog.Close()
+
+ fd, err := sys.LinkCreateKprobeMulti(&sys.LinkCreateKprobeMultiAttr{
+ ProgFd: uint32(prog.FD()),
+ AttachType: sys.BPF_TRACE_KPROBE_MULTI,
+ Count: 1,
+ Syms: sys.NewStringSlicePointer([]string{"vprintk"}),
+ })
+ switch {
+ case errors.Is(err, unix.EINVAL):
+ return internal.ErrNotSupported
+ // If CONFIG_FPROBE isn't set.
+ case errors.Is(err, unix.EOPNOTSUPP):
+ return internal.ErrNotSupported
+ case err != nil:
+ return err
+ }
+
+ fd.Close()
+
+ return nil
+})
diff --git a/vendor/github.com/cilium/ebpf/link/link.go b/vendor/github.com/cilium/ebpf/link/link.go
new file mode 100644
index 000000000..36acd6ee4
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/link/link.go
@@ -0,0 +1,336 @@
+package link
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+
+ "github.com/cilium/ebpf"
+ "github.com/cilium/ebpf/btf"
+ "github.com/cilium/ebpf/internal"
+ "github.com/cilium/ebpf/internal/sys"
+)
+
+var ErrNotSupported = internal.ErrNotSupported
+
+// Link represents a Program attached to a BPF hook.
+type Link interface {
+ // Replace the current program with a new program.
+ //
+ // Passing a nil program is an error. May return an error wrapping ErrNotSupported.
+ Update(*ebpf.Program) error
+
+ // Persist a link by pinning it into a bpffs.
+ //
+ // May return an error wrapping ErrNotSupported.
+ Pin(string) error
+
+ // Undo a previous call to Pin.
+ //
+ // May return an error wrapping ErrNotSupported.
+ Unpin() error
+
+ // Close frees resources.
+ //
+ // The link will be broken unless it has been successfully pinned.
+ // A link may continue past the lifetime of the process if Close is
+ // not called.
+ Close() error
+
+ // Info returns metadata on a link.
+ //
+ // May return an error wrapping ErrNotSupported.
+ Info() (*Info, error)
+
+ // Prevent external users from implementing this interface.
+ isLink()
+}
+
+// NewLinkFromFD creates a link from a raw fd.
+//
+// You should not use fd after calling this function.
+func NewLinkFromFD(fd int) (Link, error) {
+ sysFD, err := sys.NewFD(fd)
+ if err != nil {
+ return nil, err
+ }
+
+ return wrapRawLink(&RawLink{fd: sysFD})
+}
+
+// LoadPinnedLink loads a link that was persisted into a bpffs.
+func LoadPinnedLink(fileName string, opts *ebpf.LoadPinOptions) (Link, error) {
+ raw, err := loadPinnedRawLink(fileName, opts)
+ if err != nil {
+ return nil, err
+ }
+
+ return wrapRawLink(raw)
+}
+
+// wrap a RawLink in a more specific type if possible.
+//
+// The function takes ownership of raw and closes it on error.
+func wrapRawLink(raw *RawLink) (_ Link, err error) {
+ defer func() {
+ if err != nil {
+ raw.Close()
+ }
+ }()
+
+ info, err := raw.Info()
+ if err != nil {
+ return nil, err
+ }
+
+ switch info.Type {
+ case RawTracepointType:
+ return &rawTracepoint{*raw}, nil
+ case TracingType:
+ return &tracing{*raw}, nil
+ case CgroupType:
+ return &linkCgroup{*raw}, nil
+ case IterType:
+ return &Iter{*raw}, nil
+ case NetNsType:
+ return &NetNsLink{*raw}, nil
+ case KprobeMultiType:
+ return &kprobeMultiLink{*raw}, nil
+ case PerfEventType:
+ return nil, fmt.Errorf("recovering perf event fd: %w", ErrNotSupported)
+ default:
+ return raw, nil
+ }
+}
+
+// ID uniquely identifies a BPF link.
+type ID = sys.LinkID
+
+// RawLinkOptions control the creation of a raw link.
+type RawLinkOptions struct {
+ // File descriptor to attach to. This differs for each attach type.
+ Target int
+ // Program to attach.
+ Program *ebpf.Program
+ // Attach must match the attach type of Program.
+ Attach ebpf.AttachType
+ // BTF is the BTF of the attachment target.
+ BTF btf.TypeID
+ // Flags control the attach behaviour.
+ Flags uint32
+}
+
+// Info contains metadata on a link.
+type Info struct {
+ Type Type
+ ID ID
+ Program ebpf.ProgramID
+ extra interface{}
+}
+
+type TracingInfo sys.TracingLinkInfo
+type CgroupInfo sys.CgroupLinkInfo
+type NetNsInfo sys.NetNsLinkInfo
+type XDPInfo sys.XDPLinkInfo
+
+// Tracing returns tracing type-specific link info.
+//
+// Returns nil if the type-specific link info isn't available.
+func (r Info) Tracing() *TracingInfo {
+ e, _ := r.extra.(*TracingInfo)
+ return e
+}
+
+// Cgroup returns cgroup type-specific link info.
+//
+// Returns nil if the type-specific link info isn't available.
+func (r Info) Cgroup() *CgroupInfo {
+ e, _ := r.extra.(*CgroupInfo)
+ return e
+}
+
+// NetNs returns netns type-specific link info.
+//
+// Returns nil if the type-specific link info isn't available.
+func (r Info) NetNs() *NetNsInfo {
+ e, _ := r.extra.(*NetNsInfo)
+ return e
+}
+
+// ExtraNetNs returns XDP type-specific link info.
+//
+// Returns nil if the type-specific link info isn't available.
+func (r Info) XDP() *XDPInfo {
+ e, _ := r.extra.(*XDPInfo)
+ return e
+}
+
+// RawLink is the low-level API to bpf_link.
+//
+// You should consider using the higher level interfaces in this
+// package instead.
+type RawLink struct {
+ fd *sys.FD
+ pinnedPath string
+}
+
+// AttachRawLink creates a raw link.
+func AttachRawLink(opts RawLinkOptions) (*RawLink, error) {
+ if err := haveBPFLink(); err != nil {
+ return nil, err
+ }
+
+ if opts.Target < 0 {
+ return nil, fmt.Errorf("invalid target: %s", sys.ErrClosedFd)
+ }
+
+ progFd := opts.Program.FD()
+ if progFd < 0 {
+ return nil, fmt.Errorf("invalid program: %s", sys.ErrClosedFd)
+ }
+
+ attr := sys.LinkCreateAttr{
+ TargetFd: uint32(opts.Target),
+ ProgFd: uint32(progFd),
+ AttachType: sys.AttachType(opts.Attach),
+ TargetBtfId: opts.BTF,
+ Flags: opts.Flags,
+ }
+ fd, err := sys.LinkCreate(&attr)
+ if err != nil {
+ return nil, fmt.Errorf("create link: %w", err)
+ }
+
+ return &RawLink{fd, ""}, nil
+}
+
+func loadPinnedRawLink(fileName string, opts *ebpf.LoadPinOptions) (*RawLink, error) {
+ fd, err := sys.ObjGet(&sys.ObjGetAttr{
+ Pathname: sys.NewStringPointer(fileName),
+ FileFlags: opts.Marshal(),
+ })
+ if err != nil {
+ return nil, fmt.Errorf("load pinned link: %w", err)
+ }
+
+ return &RawLink{fd, fileName}, nil
+}
+
+func (l *RawLink) isLink() {}
+
+// FD returns the raw file descriptor.
+func (l *RawLink) FD() int {
+ return l.fd.Int()
+}
+
+// Close breaks the link.
+//
+// Use Pin if you want to make the link persistent.
+func (l *RawLink) Close() error {
+ return l.fd.Close()
+}
+
+// Pin persists a link past the lifetime of the process.
+//
+// Calling Close on a pinned Link will not break the link
+// until the pin is removed.
+func (l *RawLink) Pin(fileName string) error {
+ if err := internal.Pin(l.pinnedPath, fileName, l.fd); err != nil {
+ return err
+ }
+ l.pinnedPath = fileName
+ return nil
+}
+
+// Unpin implements the Link interface.
+func (l *RawLink) Unpin() error {
+ if err := internal.Unpin(l.pinnedPath); err != nil {
+ return err
+ }
+ l.pinnedPath = ""
+ return nil
+}
+
+// IsPinned returns true if the Link has a non-empty pinned path.
+func (l *RawLink) IsPinned() bool {
+ return l.pinnedPath != ""
+}
+
+// Update implements the Link interface.
+func (l *RawLink) Update(new *ebpf.Program) error {
+ return l.UpdateArgs(RawLinkUpdateOptions{
+ New: new,
+ })
+}
+
+// RawLinkUpdateOptions control the behaviour of RawLink.UpdateArgs.
+type RawLinkUpdateOptions struct {
+ New *ebpf.Program
+ Old *ebpf.Program
+ Flags uint32
+}
+
+// UpdateArgs updates a link based on args.
+func (l *RawLink) UpdateArgs(opts RawLinkUpdateOptions) error {
+ newFd := opts.New.FD()
+ if newFd < 0 {
+ return fmt.Errorf("invalid program: %s", sys.ErrClosedFd)
+ }
+
+ var oldFd int
+ if opts.Old != nil {
+ oldFd = opts.Old.FD()
+ if oldFd < 0 {
+ return fmt.Errorf("invalid replacement program: %s", sys.ErrClosedFd)
+ }
+ }
+
+ attr := sys.LinkUpdateAttr{
+ LinkFd: l.fd.Uint(),
+ NewProgFd: uint32(newFd),
+ OldProgFd: uint32(oldFd),
+ Flags: opts.Flags,
+ }
+ return sys.LinkUpdate(&attr)
+}
+
+// Info returns metadata about the link.
+func (l *RawLink) Info() (*Info, error) {
+ var info sys.LinkInfo
+
+ if err := sys.ObjInfo(l.fd, &info); err != nil {
+ return nil, fmt.Errorf("link info: %s", err)
+ }
+
+ var extra interface{}
+ switch info.Type {
+ case CgroupType:
+ extra = &CgroupInfo{}
+ case NetNsType:
+ extra = &NetNsInfo{}
+ case TracingType:
+ extra = &TracingInfo{}
+ case XDPType:
+ extra = &XDPInfo{}
+ case RawTracepointType, IterType,
+ PerfEventType, KprobeMultiType:
+ // Extra metadata not supported.
+ default:
+ return nil, fmt.Errorf("unknown link info type: %d", info.Type)
+ }
+
+ if extra != nil {
+ buf := bytes.NewReader(info.Extra[:])
+ err := binary.Read(buf, internal.NativeEndian, extra)
+ if err != nil {
+ return nil, fmt.Errorf("cannot read extra link info: %w", err)
+ }
+ }
+
+ return &Info{
+ info.Type,
+ info.Id,
+ ebpf.ProgramID(info.ProgId),
+ extra,
+ }, nil
+}
diff --git a/vendor/github.com/cilium/ebpf/link/netns.go b/vendor/github.com/cilium/ebpf/link/netns.go
new file mode 100644
index 000000000..344ecced6
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/link/netns.go
@@ -0,0 +1,36 @@
+package link
+
+import (
+ "fmt"
+
+ "github.com/cilium/ebpf"
+)
+
+// NetNsLink is a program attached to a network namespace.
+type NetNsLink struct {
+ RawLink
+}
+
+// AttachNetNs attaches a program to a network namespace.
+func AttachNetNs(ns int, prog *ebpf.Program) (*NetNsLink, error) {
+ var attach ebpf.AttachType
+ switch t := prog.Type(); t {
+ case ebpf.FlowDissector:
+ attach = ebpf.AttachFlowDissector
+ case ebpf.SkLookup:
+ attach = ebpf.AttachSkLookup
+ default:
+ return nil, fmt.Errorf("can't attach %v to network namespace", t)
+ }
+
+ link, err := AttachRawLink(RawLinkOptions{
+ Target: ns,
+ Program: prog,
+ Attach: attach,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return &NetNsLink{*link}, nil
+}
diff --git a/vendor/github.com/cilium/ebpf/link/perf_event.go b/vendor/github.com/cilium/ebpf/link/perf_event.go
new file mode 100644
index 000000000..5f7a628b3
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/link/perf_event.go
@@ -0,0 +1,270 @@
+package link
+
+import (
+ "errors"
+ "fmt"
+ "runtime"
+ "unsafe"
+
+ "github.com/cilium/ebpf"
+ "github.com/cilium/ebpf/asm"
+ "github.com/cilium/ebpf/internal"
+ "github.com/cilium/ebpf/internal/sys"
+ "github.com/cilium/ebpf/internal/tracefs"
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+// Getting the terminology right is usually the hardest part. For posterity and
+// for staying sane during implementation:
+//
+// - trace event: Representation of a kernel runtime hook. Filesystem entries
+// under /events. Can be tracepoints (static), kprobes or uprobes.
+// Can be instantiated into perf events (see below).
+// - tracepoint: A predetermined hook point in the kernel. Exposed as trace
+// events in (sub)directories under /events. Cannot be closed or
+// removed, they are static.
+// - k(ret)probe: Ephemeral trace events based on entry or exit points of
+// exported kernel symbols. kprobe-based (tracefs) trace events can be
+// created system-wide by writing to the /kprobe_events file, or
+// they can be scoped to the current process by creating PMU perf events.
+// - u(ret)probe: Ephemeral trace events based on user provides ELF binaries
+// and offsets. uprobe-based (tracefs) trace events can be
+// created system-wide by writing to the /uprobe_events file, or
+// they can be scoped to the current process by creating PMU perf events.
+// - perf event: An object instantiated based on an existing trace event or
+// kernel symbol. Referred to by fd in userspace.
+// Exactly one eBPF program can be attached to a perf event. Multiple perf
+// events can be created from a single trace event. Closing a perf event
+// stops any further invocations of the attached eBPF program.
+
+var (
+ errInvalidInput = tracefs.ErrInvalidInput
+)
+
+const (
+ perfAllThreads = -1
+)
+
+// A perfEvent represents a perf event kernel object. Exactly one eBPF program
+// can be attached to it. It is created based on a tracefs trace event or a
+// Performance Monitoring Unit (PMU).
+type perfEvent struct {
+ // Trace event backing this perfEvent. May be nil.
+ tracefsEvent *tracefs.Event
+
+ // This is the perf event FD.
+ fd *sys.FD
+}
+
+func newPerfEvent(fd *sys.FD, event *tracefs.Event) *perfEvent {
+ pe := &perfEvent{event, fd}
+ // Both event and fd have their own finalizer, but we want to
+ // guarantee that they are closed in a certain order.
+ runtime.SetFinalizer(pe, (*perfEvent).Close)
+ return pe
+}
+
+func (pe *perfEvent) Close() error {
+ runtime.SetFinalizer(pe, nil)
+
+ if err := pe.fd.Close(); err != nil {
+ return fmt.Errorf("closing perf event fd: %w", err)
+ }
+
+ if pe.tracefsEvent != nil {
+ return pe.tracefsEvent.Close()
+ }
+
+ return nil
+}
+
+// perfEventLink represents a bpf perf link.
+type perfEventLink struct {
+ RawLink
+ pe *perfEvent
+}
+
+func (pl *perfEventLink) isLink() {}
+
+// Pinning requires the underlying perf event FD to stay open.
+//
+// | PerfEvent FD | BpfLink FD | Works |
+// |--------------|------------|-------|
+// | Open | Open | Yes |
+// | Closed | Open | No |
+// | Open | Closed | No (Pin() -> EINVAL) |
+// | Closed | Closed | No (Pin() -> EINVAL) |
+//
+// There is currently no pretty way to recover the perf event FD
+// when loading a pinned link, so leave as not supported for now.
+func (pl *perfEventLink) Pin(string) error {
+ return fmt.Errorf("perf event link pin: %w", ErrNotSupported)
+}
+
+func (pl *perfEventLink) Unpin() error {
+ return fmt.Errorf("perf event link unpin: %w", ErrNotSupported)
+}
+
+func (pl *perfEventLink) Close() error {
+ if err := pl.fd.Close(); err != nil {
+ return fmt.Errorf("perf link close: %w", err)
+ }
+
+ if err := pl.pe.Close(); err != nil {
+ return fmt.Errorf("perf event close: %w", err)
+ }
+ return nil
+}
+
+func (pl *perfEventLink) Update(prog *ebpf.Program) error {
+ return fmt.Errorf("perf event link update: %w", ErrNotSupported)
+}
+
+// perfEventIoctl implements Link and handles the perf event lifecycle
+// via ioctl().
+type perfEventIoctl struct {
+ *perfEvent
+}
+
+func (pi *perfEventIoctl) isLink() {}
+
+// Since 4.15 (e87c6bc3852b "bpf: permit multiple bpf attachments for a single perf event"),
+// calling PERF_EVENT_IOC_SET_BPF appends the given program to a prog_array
+// owned by the perf event, which means multiple programs can be attached
+// simultaneously.
+//
+// Before 4.15, calling PERF_EVENT_IOC_SET_BPF more than once on a perf event
+// returns EEXIST.
+//
+// Detaching a program from a perf event is currently not possible, so a
+// program replacement mechanism cannot be implemented for perf events.
+func (pi *perfEventIoctl) Update(prog *ebpf.Program) error {
+ return fmt.Errorf("perf event ioctl update: %w", ErrNotSupported)
+}
+
+func (pi *perfEventIoctl) Pin(string) error {
+ return fmt.Errorf("perf event ioctl pin: %w", ErrNotSupported)
+}
+
+func (pi *perfEventIoctl) Unpin() error {
+ return fmt.Errorf("perf event ioctl unpin: %w", ErrNotSupported)
+}
+
+func (pi *perfEventIoctl) Info() (*Info, error) {
+ return nil, fmt.Errorf("perf event ioctl info: %w", ErrNotSupported)
+}
+
+// attach the given eBPF prog to the perf event stored in pe.
+// pe must contain a valid perf event fd.
+// prog's type must match the program type stored in pe.
+func attachPerfEvent(pe *perfEvent, prog *ebpf.Program, cookie uint64) (Link, error) {
+ if prog == nil {
+ return nil, errors.New("cannot attach a nil program")
+ }
+ if prog.FD() < 0 {
+ return nil, fmt.Errorf("invalid program: %w", sys.ErrClosedFd)
+ }
+
+ if err := haveBPFLinkPerfEvent(); err == nil {
+ return attachPerfEventLink(pe, prog, cookie)
+ }
+
+ if cookie != 0 {
+ return nil, fmt.Errorf("cookies are not supported: %w", ErrNotSupported)
+ }
+
+ return attachPerfEventIoctl(pe, prog)
+}
+
+func attachPerfEventIoctl(pe *perfEvent, prog *ebpf.Program) (*perfEventIoctl, error) {
+ // Assign the eBPF program to the perf event.
+ err := unix.IoctlSetInt(pe.fd.Int(), unix.PERF_EVENT_IOC_SET_BPF, prog.FD())
+ if err != nil {
+ return nil, fmt.Errorf("setting perf event bpf program: %w", err)
+ }
+
+ // PERF_EVENT_IOC_ENABLE and _DISABLE ignore their given values.
+ if err := unix.IoctlSetInt(pe.fd.Int(), unix.PERF_EVENT_IOC_ENABLE, 0); err != nil {
+ return nil, fmt.Errorf("enable perf event: %s", err)
+ }
+
+ return &perfEventIoctl{pe}, nil
+}
+
+// Use the bpf api to attach the perf event (BPF_LINK_TYPE_PERF_EVENT, 5.15+).
+//
+// https://github.com/torvalds/linux/commit/b89fbfbb854c9afc3047e8273cc3a694650b802e
+func attachPerfEventLink(pe *perfEvent, prog *ebpf.Program, cookie uint64) (*perfEventLink, error) {
+ fd, err := sys.LinkCreatePerfEvent(&sys.LinkCreatePerfEventAttr{
+ ProgFd: uint32(prog.FD()),
+ TargetFd: pe.fd.Uint(),
+ AttachType: sys.BPF_PERF_EVENT,
+ BpfCookie: cookie,
+ })
+ if err != nil {
+ return nil, fmt.Errorf("cannot create bpf perf link: %v", err)
+ }
+
+ return &perfEventLink{RawLink{fd: fd}, pe}, nil
+}
+
+// unsafeStringPtr returns an unsafe.Pointer to a NUL-terminated copy of str.
+func unsafeStringPtr(str string) (unsafe.Pointer, error) {
+ p, err := unix.BytePtrFromString(str)
+ if err != nil {
+ return nil, err
+ }
+ return unsafe.Pointer(p), nil
+}
+
+// openTracepointPerfEvent opens a tracepoint-type perf event. System-wide
+// [k,u]probes created by writing to /[k,u]probe_events are tracepoints
+// behind the scenes, and can be attached to using these perf events.
+func openTracepointPerfEvent(tid uint64, pid int) (*sys.FD, error) {
+ attr := unix.PerfEventAttr{
+ Type: unix.PERF_TYPE_TRACEPOINT,
+ Config: tid,
+ Sample_type: unix.PERF_SAMPLE_RAW,
+ Sample: 1,
+ Wakeup: 1,
+ }
+
+ fd, err := unix.PerfEventOpen(&attr, pid, 0, -1, unix.PERF_FLAG_FD_CLOEXEC)
+ if err != nil {
+ return nil, fmt.Errorf("opening tracepoint perf event: %w", err)
+ }
+
+ return sys.NewFD(fd)
+}
+
+// Probe BPF perf link.
+//
+// https://elixir.bootlin.com/linux/v5.16.8/source/kernel/bpf/syscall.c#L4307
+// https://github.com/torvalds/linux/commit/b89fbfbb854c9afc3047e8273cc3a694650b802e
+var haveBPFLinkPerfEvent = internal.NewFeatureTest("bpf_link_perf_event", "5.15", func() error {
+ prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{
+ Name: "probe_bpf_perf_link",
+ Type: ebpf.Kprobe,
+ Instructions: asm.Instructions{
+ asm.Mov.Imm(asm.R0, 0),
+ asm.Return(),
+ },
+ License: "MIT",
+ })
+ if err != nil {
+ return err
+ }
+ defer prog.Close()
+
+ _, err = sys.LinkCreatePerfEvent(&sys.LinkCreatePerfEventAttr{
+ ProgFd: uint32(prog.FD()),
+ AttachType: sys.BPF_PERF_EVENT,
+ })
+ if errors.Is(err, unix.EINVAL) {
+ return internal.ErrNotSupported
+ }
+ if errors.Is(err, unix.EBADF) {
+ return nil
+ }
+ return err
+})
diff --git a/vendor/github.com/cilium/ebpf/link/program.go b/vendor/github.com/cilium/ebpf/link/program.go
new file mode 100644
index 000000000..053735a67
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/link/program.go
@@ -0,0 +1,75 @@
+package link
+
+import (
+ "fmt"
+
+ "github.com/cilium/ebpf"
+ "github.com/cilium/ebpf/internal/sys"
+)
+
+type RawAttachProgramOptions struct {
+ // File descriptor to attach to. This differs for each attach type.
+ Target int
+ // Program to attach.
+ Program *ebpf.Program
+ // Program to replace (cgroups).
+ Replace *ebpf.Program
+ // Attach must match the attach type of Program (and Replace).
+ Attach ebpf.AttachType
+ // Flags control the attach behaviour. This differs for each attach type.
+ Flags uint32
+}
+
+// RawAttachProgram is a low level wrapper around BPF_PROG_ATTACH.
+//
+// You should use one of the higher level abstractions available in this
+// package if possible.
+func RawAttachProgram(opts RawAttachProgramOptions) error {
+ var replaceFd uint32
+ if opts.Replace != nil {
+ replaceFd = uint32(opts.Replace.FD())
+ }
+
+ attr := sys.ProgAttachAttr{
+ TargetFd: uint32(opts.Target),
+ AttachBpfFd: uint32(opts.Program.FD()),
+ ReplaceBpfFd: replaceFd,
+ AttachType: uint32(opts.Attach),
+ AttachFlags: uint32(opts.Flags),
+ }
+
+ if err := sys.ProgAttach(&attr); err != nil {
+ if haveFeatErr := haveProgAttach(); haveFeatErr != nil {
+ return haveFeatErr
+ }
+ return fmt.Errorf("can't attach program: %w", err)
+ }
+
+ return nil
+}
+
+type RawDetachProgramOptions struct {
+ Target int
+ Program *ebpf.Program
+ Attach ebpf.AttachType
+}
+
+// RawDetachProgram is a low level wrapper around BPF_PROG_DETACH.
+//
+// You should use one of the higher level abstractions available in this
+// package if possible.
+func RawDetachProgram(opts RawDetachProgramOptions) error {
+ attr := sys.ProgDetachAttr{
+ TargetFd: uint32(opts.Target),
+ AttachBpfFd: uint32(opts.Program.FD()),
+ AttachType: uint32(opts.Attach),
+ }
+ if err := sys.ProgDetach(&attr); err != nil {
+ if haveFeatErr := haveProgAttach(); haveFeatErr != nil {
+ return haveFeatErr
+ }
+ return fmt.Errorf("can't detach program: %w", err)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/ebpf/link/query.go b/vendor/github.com/cilium/ebpf/link/query.go
new file mode 100644
index 000000000..c05656512
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/link/query.go
@@ -0,0 +1,63 @@
+package link
+
+import (
+ "fmt"
+ "os"
+ "unsafe"
+
+ "github.com/cilium/ebpf"
+ "github.com/cilium/ebpf/internal/sys"
+)
+
+// QueryOptions defines additional parameters when querying for programs.
+type QueryOptions struct {
+ // Path can be a path to a cgroup, netns or LIRC2 device
+ Path string
+ // Attach specifies the AttachType of the programs queried for
+ Attach ebpf.AttachType
+ // QueryFlags are flags for BPF_PROG_QUERY, e.g. BPF_F_QUERY_EFFECTIVE
+ QueryFlags uint32
+}
+
+// QueryPrograms retrieves ProgramIDs associated with the AttachType.
+//
+// Returns (nil, nil) if there are no programs attached to the queried kernel
+// resource. Calling QueryPrograms on a kernel missing PROG_QUERY will result in
+// ErrNotSupported.
+func QueryPrograms(opts QueryOptions) ([]ebpf.ProgramID, error) {
+ if haveProgQuery() != nil {
+ return nil, fmt.Errorf("can't query program IDs: %w", ErrNotSupported)
+ }
+
+ f, err := os.Open(opts.Path)
+ if err != nil {
+ return nil, fmt.Errorf("can't open file: %s", err)
+ }
+ defer f.Close()
+
+ // query the number of programs to allocate correct slice size
+ attr := sys.ProgQueryAttr{
+ TargetFd: uint32(f.Fd()),
+ AttachType: sys.AttachType(opts.Attach),
+ QueryFlags: opts.QueryFlags,
+ }
+ if err := sys.ProgQuery(&attr); err != nil {
+ return nil, fmt.Errorf("can't query program count: %w", err)
+ }
+
+ // return nil if no progs are attached
+ if attr.ProgCount == 0 {
+ return nil, nil
+ }
+
+ // we have at least one prog, so we query again
+ progIds := make([]ebpf.ProgramID, attr.ProgCount)
+ attr.ProgIds = sys.NewPointer(unsafe.Pointer(&progIds[0]))
+ attr.ProgCount = uint32(len(progIds))
+ if err := sys.ProgQuery(&attr); err != nil {
+ return nil, fmt.Errorf("can't query program IDs: %w", err)
+ }
+
+ return progIds, nil
+
+}
diff --git a/vendor/github.com/cilium/ebpf/link/raw_tracepoint.go b/vendor/github.com/cilium/ebpf/link/raw_tracepoint.go
new file mode 100644
index 000000000..925e621cb
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/link/raw_tracepoint.go
@@ -0,0 +1,87 @@
+package link
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/cilium/ebpf"
+ "github.com/cilium/ebpf/internal/sys"
+)
+
+type RawTracepointOptions struct {
+ // Tracepoint name.
+ Name string
+ // Program must be of type RawTracepoint*
+ Program *ebpf.Program
+}
+
+// AttachRawTracepoint links a BPF program to a raw_tracepoint.
+//
+// Requires at least Linux 4.17.
+func AttachRawTracepoint(opts RawTracepointOptions) (Link, error) {
+ if t := opts.Program.Type(); t != ebpf.RawTracepoint && t != ebpf.RawTracepointWritable {
+ return nil, fmt.Errorf("invalid program type %s, expected RawTracepoint(Writable)", t)
+ }
+ if opts.Program.FD() < 0 {
+ return nil, fmt.Errorf("invalid program: %w", sys.ErrClosedFd)
+ }
+
+ fd, err := sys.RawTracepointOpen(&sys.RawTracepointOpenAttr{
+ Name: sys.NewStringPointer(opts.Name),
+ ProgFd: uint32(opts.Program.FD()),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ err = haveBPFLink()
+ if errors.Is(err, ErrNotSupported) {
+ // Prior to commit 70ed506c3bbc ("bpf: Introduce pinnable bpf_link abstraction")
+ // raw_tracepoints are just a plain fd.
+ return &simpleRawTracepoint{fd}, nil
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ return &rawTracepoint{RawLink{fd: fd}}, nil
+}
+
+type simpleRawTracepoint struct {
+ fd *sys.FD
+}
+
+var _ Link = (*simpleRawTracepoint)(nil)
+
+func (frt *simpleRawTracepoint) isLink() {}
+
+func (frt *simpleRawTracepoint) Close() error {
+ return frt.fd.Close()
+}
+
+func (frt *simpleRawTracepoint) Update(_ *ebpf.Program) error {
+ return fmt.Errorf("update raw_tracepoint: %w", ErrNotSupported)
+}
+
+func (frt *simpleRawTracepoint) Pin(string) error {
+ return fmt.Errorf("pin raw_tracepoint: %w", ErrNotSupported)
+}
+
+func (frt *simpleRawTracepoint) Unpin() error {
+ return fmt.Errorf("unpin raw_tracepoint: %w", ErrNotSupported)
+}
+
+func (frt *simpleRawTracepoint) Info() (*Info, error) {
+ return nil, fmt.Errorf("can't get raw_tracepoint info: %w", ErrNotSupported)
+}
+
+type rawTracepoint struct {
+ RawLink
+}
+
+var _ Link = (*rawTracepoint)(nil)
+
+func (rt *rawTracepoint) Update(_ *ebpf.Program) error {
+ return fmt.Errorf("update raw_tracepoint: %w", ErrNotSupported)
+}
diff --git a/vendor/github.com/cilium/ebpf/link/socket_filter.go b/vendor/github.com/cilium/ebpf/link/socket_filter.go
new file mode 100644
index 000000000..84f0b656f
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/link/socket_filter.go
@@ -0,0 +1,40 @@
+package link
+
+import (
+ "syscall"
+
+ "github.com/cilium/ebpf"
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+// AttachSocketFilter attaches a SocketFilter BPF program to a socket.
+func AttachSocketFilter(conn syscall.Conn, program *ebpf.Program) error {
+ rawConn, err := conn.SyscallConn()
+ if err != nil {
+ return err
+ }
+ var ssoErr error
+ err = rawConn.Control(func(fd uintptr) {
+ ssoErr = unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_ATTACH_BPF, program.FD())
+ })
+ if ssoErr != nil {
+ return ssoErr
+ }
+ return err
+}
+
+// DetachSocketFilter detaches a SocketFilter BPF program from a socket.
+func DetachSocketFilter(conn syscall.Conn) error {
+ rawConn, err := conn.SyscallConn()
+ if err != nil {
+ return err
+ }
+ var ssoErr error
+ err = rawConn.Control(func(fd uintptr) {
+ ssoErr = unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_DETACH_BPF, 0)
+ })
+ if ssoErr != nil {
+ return ssoErr
+ }
+ return err
+}
diff --git a/vendor/github.com/cilium/ebpf/link/syscalls.go b/vendor/github.com/cilium/ebpf/link/syscalls.go
new file mode 100644
index 000000000..012970ec7
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/link/syscalls.go
@@ -0,0 +1,126 @@
+package link
+
+import (
+ "errors"
+
+ "github.com/cilium/ebpf"
+ "github.com/cilium/ebpf/asm"
+ "github.com/cilium/ebpf/internal"
+ "github.com/cilium/ebpf/internal/sys"
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+// Type is the kind of link.
+type Type = sys.LinkType
+
+// Valid link types.
+const (
+ UnspecifiedType = sys.BPF_LINK_TYPE_UNSPEC
+ RawTracepointType = sys.BPF_LINK_TYPE_RAW_TRACEPOINT
+ TracingType = sys.BPF_LINK_TYPE_TRACING
+ CgroupType = sys.BPF_LINK_TYPE_CGROUP
+ IterType = sys.BPF_LINK_TYPE_ITER
+ NetNsType = sys.BPF_LINK_TYPE_NETNS
+ XDPType = sys.BPF_LINK_TYPE_XDP
+ PerfEventType = sys.BPF_LINK_TYPE_PERF_EVENT
+ KprobeMultiType = sys.BPF_LINK_TYPE_KPROBE_MULTI
+)
+
+var haveProgAttach = internal.NewFeatureTest("BPF_PROG_ATTACH", "4.10", func() error {
+ prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{
+ Type: ebpf.CGroupSKB,
+ License: "MIT",
+ Instructions: asm.Instructions{
+ asm.Mov.Imm(asm.R0, 0),
+ asm.Return(),
+ },
+ })
+ if err != nil {
+ return internal.ErrNotSupported
+ }
+
+ // BPF_PROG_ATTACH was introduced at the same time as CGgroupSKB,
+ // so being able to load the program is enough to infer that we
+ // have the syscall.
+ prog.Close()
+ return nil
+})
+
+var haveProgAttachReplace = internal.NewFeatureTest("BPF_PROG_ATTACH atomic replacement of MULTI progs", "5.5", func() error {
+ if err := haveProgAttach(); err != nil {
+ return err
+ }
+
+ prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{
+ Type: ebpf.CGroupSKB,
+ AttachType: ebpf.AttachCGroupInetIngress,
+ License: "MIT",
+ Instructions: asm.Instructions{
+ asm.Mov.Imm(asm.R0, 0),
+ asm.Return(),
+ },
+ })
+
+ if err != nil {
+ return internal.ErrNotSupported
+ }
+
+ defer prog.Close()
+
+ // We know that we have BPF_PROG_ATTACH since we can load CGroupSKB programs.
+ // If passing BPF_F_REPLACE gives us EINVAL we know that the feature isn't
+ // present.
+ attr := sys.ProgAttachAttr{
+ // We rely on this being checked after attachFlags.
+ TargetFd: ^uint32(0),
+ AttachBpfFd: uint32(prog.FD()),
+ AttachType: uint32(ebpf.AttachCGroupInetIngress),
+ AttachFlags: uint32(flagReplace),
+ }
+
+ err = sys.ProgAttach(&attr)
+ if errors.Is(err, unix.EINVAL) {
+ return internal.ErrNotSupported
+ }
+ if errors.Is(err, unix.EBADF) {
+ return nil
+ }
+ return err
+})
+
+var haveBPFLink = internal.NewFeatureTest("bpf_link", "5.7", func() error {
+ attr := sys.LinkCreateAttr{
+ // This is a hopefully invalid file descriptor, which triggers EBADF.
+ TargetFd: ^uint32(0),
+ ProgFd: ^uint32(0),
+ AttachType: sys.AttachType(ebpf.AttachCGroupInetIngress),
+ }
+ _, err := sys.LinkCreate(&attr)
+ if errors.Is(err, unix.EINVAL) {
+ return internal.ErrNotSupported
+ }
+ if errors.Is(err, unix.EBADF) {
+ return nil
+ }
+ return err
+})
+
+var haveProgQuery = internal.NewFeatureTest("BPF_PROG_QUERY", "4.15", func() error {
+ attr := sys.ProgQueryAttr{
+ // We rely on this being checked during the syscall.
+ // With an otherwise correct payload we expect EBADF here
+ // as an indication that the feature is present.
+ TargetFd: ^uint32(0),
+ AttachType: sys.AttachType(ebpf.AttachCGroupInetIngress),
+ }
+
+ err := sys.ProgQuery(&attr)
+
+ if errors.Is(err, unix.EBADF) {
+ return nil
+ }
+ if err != nil {
+ return ErrNotSupported
+ }
+ return errors.New("syscall succeeded unexpectedly")
+})
diff --git a/vendor/github.com/cilium/ebpf/link/tracepoint.go b/vendor/github.com/cilium/ebpf/link/tracepoint.go
new file mode 100644
index 000000000..95f5fae3b
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/link/tracepoint.go
@@ -0,0 +1,68 @@
+package link
+
+import (
+ "fmt"
+
+ "github.com/cilium/ebpf"
+ "github.com/cilium/ebpf/internal/tracefs"
+)
+
+// TracepointOptions defines additional parameters that will be used
+// when loading Tracepoints.
+type TracepointOptions struct {
+ // Arbitrary value that can be fetched from an eBPF program
+ // via `bpf_get_attach_cookie()`.
+ //
+ // Needs kernel 5.15+.
+ Cookie uint64
+}
+
+// Tracepoint attaches the given eBPF program to the tracepoint with the given
+// group and name. See /sys/kernel/tracing/events to find available
+// tracepoints. The top-level directory is the group, the event's subdirectory
+// is the name. Example:
+//
+// tp, err := Tracepoint("syscalls", "sys_enter_fork", prog, nil)
+//
+// Losing the reference to the resulting Link (tp) will close the Tracepoint
+// and prevent further execution of prog. The Link must be Closed during
+// program shutdown to avoid leaking system resources.
+//
+// Note that attaching eBPF programs to syscalls (sys_enter_*/sys_exit_*) is
+// only possible as of kernel 4.14 (commit cf5f5ce).
+func Tracepoint(group, name string, prog *ebpf.Program, opts *TracepointOptions) (Link, error) {
+ if group == "" || name == "" {
+ return nil, fmt.Errorf("group and name cannot be empty: %w", errInvalidInput)
+ }
+ if prog == nil {
+ return nil, fmt.Errorf("prog cannot be nil: %w", errInvalidInput)
+ }
+ if prog.Type() != ebpf.TracePoint {
+ return nil, fmt.Errorf("eBPF program type %s is not a Tracepoint: %w", prog.Type(), errInvalidInput)
+ }
+
+ tid, err := tracefs.EventID(group, name)
+ if err != nil {
+ return nil, err
+ }
+
+ fd, err := openTracepointPerfEvent(tid, perfAllThreads)
+ if err != nil {
+ return nil, err
+ }
+
+ var cookie uint64
+ if opts != nil {
+ cookie = opts.Cookie
+ }
+
+ pe := newPerfEvent(fd, nil)
+
+ lnk, err := attachPerfEvent(pe, prog, cookie)
+ if err != nil {
+ pe.Close()
+ return nil, err
+ }
+
+ return lnk, nil
+}
diff --git a/vendor/github.com/cilium/ebpf/link/tracing.go b/vendor/github.com/cilium/ebpf/link/tracing.go
new file mode 100644
index 000000000..1e1a7834d
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/link/tracing.go
@@ -0,0 +1,199 @@
+package link
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/cilium/ebpf"
+ "github.com/cilium/ebpf/btf"
+ "github.com/cilium/ebpf/internal/sys"
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+type tracing struct {
+ RawLink
+}
+
+func (f *tracing) Update(new *ebpf.Program) error {
+ return fmt.Errorf("tracing update: %w", ErrNotSupported)
+}
+
+// AttachFreplace attaches the given eBPF program to the function it replaces.
+//
+// The program and name can either be provided at link time, or can be provided
+// at program load time. If they were provided at load time, they should be nil
+// and empty respectively here, as they will be ignored by the kernel.
+// Examples:
+//
+// AttachFreplace(dispatcher, "function", replacement)
+// AttachFreplace(nil, "", replacement)
+func AttachFreplace(targetProg *ebpf.Program, name string, prog *ebpf.Program) (Link, error) {
+ if (name == "") != (targetProg == nil) {
+ return nil, fmt.Errorf("must provide both or neither of name and targetProg: %w", errInvalidInput)
+ }
+ if prog == nil {
+ return nil, fmt.Errorf("prog cannot be nil: %w", errInvalidInput)
+ }
+ if prog.Type() != ebpf.Extension {
+ return nil, fmt.Errorf("eBPF program type %s is not an Extension: %w", prog.Type(), errInvalidInput)
+ }
+
+ var (
+ target int
+ typeID btf.TypeID
+ )
+ if targetProg != nil {
+ btfHandle, err := targetProg.Handle()
+ if err != nil {
+ return nil, err
+ }
+ defer btfHandle.Close()
+
+ spec, err := btfHandle.Spec(nil)
+ if err != nil {
+ return nil, err
+ }
+
+ var function *btf.Func
+ if err := spec.TypeByName(name, &function); err != nil {
+ return nil, err
+ }
+
+ target = targetProg.FD()
+ typeID, err = spec.TypeID(function)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ link, err := AttachRawLink(RawLinkOptions{
+ Target: target,
+ Program: prog,
+ Attach: ebpf.AttachNone,
+ BTF: typeID,
+ })
+ if errors.Is(err, sys.ENOTSUPP) {
+ // This may be returned by bpf_tracing_prog_attach via bpf_arch_text_poke.
+ return nil, fmt.Errorf("create raw tracepoint: %w", ErrNotSupported)
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ return &tracing{*link}, nil
+}
+
+type TracingOptions struct {
+ // Program must be of type Tracing with attach type
+ // AttachTraceFEntry/AttachTraceFExit/AttachModifyReturn or
+ // AttachTraceRawTp.
+ Program *ebpf.Program
+ // Program attach type. Can be one of:
+ // - AttachTraceFEntry
+ // - AttachTraceFExit
+ // - AttachModifyReturn
+ // - AttachTraceRawTp
+ // This field is optional.
+ AttachType ebpf.AttachType
+ // Arbitrary value that can be fetched from an eBPF program
+ // via `bpf_get_attach_cookie()`.
+ Cookie uint64
+}
+
+type LSMOptions struct {
+ // Program must be of type LSM with attach type
+ // AttachLSMMac.
+ Program *ebpf.Program
+ // Arbitrary value that can be fetched from an eBPF program
+ // via `bpf_get_attach_cookie()`.
+ Cookie uint64
+}
+
+// attachBTFID links all BPF program types (Tracing/LSM) that they attach to a btf_id.
+func attachBTFID(program *ebpf.Program, at ebpf.AttachType, cookie uint64) (Link, error) {
+ if program.FD() < 0 {
+ return nil, fmt.Errorf("invalid program %w", sys.ErrClosedFd)
+ }
+
+ var (
+ fd *sys.FD
+ err error
+ )
+ switch at {
+ case ebpf.AttachTraceFEntry, ebpf.AttachTraceFExit, ebpf.AttachTraceRawTp,
+ ebpf.AttachModifyReturn, ebpf.AttachLSMMac:
+ // Attach via BPF link
+ fd, err = sys.LinkCreateTracing(&sys.LinkCreateTracingAttr{
+ ProgFd: uint32(program.FD()),
+ AttachType: sys.AttachType(at),
+ Cookie: cookie,
+ })
+ if err == nil {
+ break
+ }
+ if !errors.Is(err, unix.EINVAL) && !errors.Is(err, sys.ENOTSUPP) {
+ return nil, fmt.Errorf("create tracing link: %w", err)
+ }
+ fallthrough
+ case ebpf.AttachNone:
+ // Attach via RawTracepointOpen
+ if cookie > 0 {
+ return nil, fmt.Errorf("create raw tracepoint with cookie: %w", ErrNotSupported)
+ }
+
+ fd, err = sys.RawTracepointOpen(&sys.RawTracepointOpenAttr{
+ ProgFd: uint32(program.FD()),
+ })
+ if errors.Is(err, sys.ENOTSUPP) {
+ // This may be returned by bpf_tracing_prog_attach via bpf_arch_text_poke.
+ return nil, fmt.Errorf("create raw tracepoint: %w", ErrNotSupported)
+ }
+ if err != nil {
+ return nil, fmt.Errorf("create raw tracepoint: %w", err)
+ }
+ default:
+ return nil, fmt.Errorf("invalid attach type: %s", at.String())
+ }
+
+ raw := RawLink{fd: fd}
+ info, err := raw.Info()
+ if err != nil {
+ raw.Close()
+ return nil, err
+ }
+
+ if info.Type == RawTracepointType {
+ // Sadness upon sadness: a Tracing program with AttachRawTp returns
+ // a raw_tracepoint link. Other types return a tracing link.
+ return &rawTracepoint{raw}, nil
+ }
+ return &tracing{raw}, nil
+}
+
+// AttachTracing links a tracing (fentry/fexit/fmod_ret) BPF program or
+// a BTF-powered raw tracepoint (tp_btf) BPF Program to a BPF hook defined
+// in kernel modules.
+func AttachTracing(opts TracingOptions) (Link, error) {
+ if t := opts.Program.Type(); t != ebpf.Tracing {
+ return nil, fmt.Errorf("invalid program type %s, expected Tracing", t)
+ }
+
+ switch opts.AttachType {
+ case ebpf.AttachTraceFEntry, ebpf.AttachTraceFExit, ebpf.AttachModifyReturn,
+ ebpf.AttachTraceRawTp, ebpf.AttachNone:
+ default:
+ return nil, fmt.Errorf("invalid attach type: %s", opts.AttachType.String())
+ }
+
+ return attachBTFID(opts.Program, opts.AttachType, opts.Cookie)
+}
+
+// AttachLSM links a Linux security module (LSM) BPF Program to a BPF
+// hook defined in kernel modules.
+func AttachLSM(opts LSMOptions) (Link, error) {
+ if t := opts.Program.Type(); t != ebpf.LSM {
+ return nil, fmt.Errorf("invalid program type %s, expected LSM", t)
+ }
+
+ return attachBTFID(opts.Program, ebpf.AttachLSMMac, opts.Cookie)
+}
diff --git a/vendor/github.com/cilium/ebpf/link/uprobe.go b/vendor/github.com/cilium/ebpf/link/uprobe.go
new file mode 100644
index 000000000..83977e0e5
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/link/uprobe.go
@@ -0,0 +1,331 @@
+package link
+
+import (
+ "debug/elf"
+ "errors"
+ "fmt"
+ "os"
+ "sync"
+
+ "github.com/cilium/ebpf"
+ "github.com/cilium/ebpf/internal"
+ "github.com/cilium/ebpf/internal/tracefs"
+)
+
+var (
+ uprobeRefCtrOffsetPMUPath = "/sys/bus/event_source/devices/uprobe/format/ref_ctr_offset"
+ // elixir.bootlin.com/linux/v5.15-rc7/source/kernel/events/core.c#L9799
+ uprobeRefCtrOffsetShift = 32
+ haveRefCtrOffsetPMU = internal.NewFeatureTest("RefCtrOffsetPMU", "4.20", func() error {
+ _, err := os.Stat(uprobeRefCtrOffsetPMUPath)
+ if errors.Is(err, os.ErrNotExist) {
+ return internal.ErrNotSupported
+ }
+ if err != nil {
+ return err
+ }
+ return nil
+ })
+
+ // ErrNoSymbol indicates that the given symbol was not found
+ // in the ELF symbols table.
+ ErrNoSymbol = errors.New("not found")
+)
+
+// Executable defines an executable program on the filesystem.
+type Executable struct {
+ // Path of the executable on the filesystem.
+ path string
+ // Parsed ELF and dynamic symbols' addresses.
+ addresses map[string]uint64
+ // Keep track of symbol table lazy load.
+ addressesOnce sync.Once
+}
+
+// UprobeOptions defines additional parameters that will be used
+// when loading Uprobes.
+type UprobeOptions struct {
+ // Symbol address. Must be provided in case of external symbols (shared libs).
+ // If set, overrides the address eventually parsed from the executable.
+ Address uint64
+ // The offset relative to given symbol. Useful when tracing an arbitrary point
+ // inside the frame of given symbol.
+ //
+ // Note: this field changed from being an absolute offset to being relative
+ // to Address.
+ Offset uint64
+ // Only set the uprobe on the given process ID. Useful when tracing
+ // shared library calls or programs that have many running instances.
+ PID int
+ // Automatically manage SDT reference counts (semaphores).
+ //
+ // If this field is set, the Kernel will increment/decrement the
+ // semaphore located in the process memory at the provided address on
+ // probe attach/detach.
+ //
+ // See also:
+ // sourceware.org/systemtap/wiki/UserSpaceProbeImplementation (Semaphore Handling)
+ // github.com/torvalds/linux/commit/1cc33161a83d
+ // github.com/torvalds/linux/commit/a6ca88b241d5
+ RefCtrOffset uint64
+ // Arbitrary value that can be fetched from an eBPF program
+ // via `bpf_get_attach_cookie()`.
+ //
+ // Needs kernel 5.15+.
+ Cookie uint64
+ // Prefix used for the event name if the uprobe must be attached using tracefs.
+ // The group name will be formatted as `_`.
+ // The default empty string is equivalent to "ebpf" as the prefix.
+ TraceFSPrefix string
+}
+
+func (uo *UprobeOptions) cookie() uint64 {
+ if uo == nil {
+ return 0
+ }
+ return uo.Cookie
+}
+
+// To open a new Executable, use:
+//
+// OpenExecutable("/bin/bash")
+//
+// The returned value can then be used to open Uprobe(s).
+func OpenExecutable(path string) (*Executable, error) {
+ if path == "" {
+ return nil, fmt.Errorf("path cannot be empty")
+ }
+
+ f, err := internal.OpenSafeELFFile(path)
+ if err != nil {
+ return nil, fmt.Errorf("parse ELF file: %w", err)
+ }
+ defer f.Close()
+
+ if f.Type != elf.ET_EXEC && f.Type != elf.ET_DYN {
+ // ELF is not an executable or a shared object.
+ return nil, errors.New("the given file is not an executable or a shared object")
+ }
+
+ return &Executable{
+ path: path,
+ addresses: make(map[string]uint64),
+ }, nil
+}
+
+func (ex *Executable) load(f *internal.SafeELFFile) error {
+ syms, err := f.Symbols()
+ if err != nil && !errors.Is(err, elf.ErrNoSymbols) {
+ return err
+ }
+
+ dynsyms, err := f.DynamicSymbols()
+ if err != nil && !errors.Is(err, elf.ErrNoSymbols) {
+ return err
+ }
+
+ syms = append(syms, dynsyms...)
+
+ for _, s := range syms {
+ if elf.ST_TYPE(s.Info) != elf.STT_FUNC {
+ // Symbol not associated with a function or other executable code.
+ continue
+ }
+
+ address := s.Value
+
+ // Loop over ELF segments.
+ for _, prog := range f.Progs {
+ // Skip uninteresting segments.
+ if prog.Type != elf.PT_LOAD || (prog.Flags&elf.PF_X) == 0 {
+ continue
+ }
+
+ if prog.Vaddr <= s.Value && s.Value < (prog.Vaddr+prog.Memsz) {
+ // If the symbol value is contained in the segment, calculate
+ // the symbol offset.
+ //
+ // fn symbol offset = fn symbol VA - .text VA + .text offset
+ //
+ // stackoverflow.com/a/40249502
+ address = s.Value - prog.Vaddr + prog.Off
+ break
+ }
+ }
+
+ ex.addresses[s.Name] = address
+ }
+
+ return nil
+}
+
+// address calculates the address of a symbol in the executable.
+//
+// opts must not be nil.
+func (ex *Executable) address(symbol string, opts *UprobeOptions) (uint64, error) {
+ if opts.Address > 0 {
+ return opts.Address + opts.Offset, nil
+ }
+
+ var err error
+ ex.addressesOnce.Do(func() {
+ var f *internal.SafeELFFile
+ f, err = internal.OpenSafeELFFile(ex.path)
+ if err != nil {
+ err = fmt.Errorf("parse ELF file: %w", err)
+ return
+ }
+ defer f.Close()
+
+ err = ex.load(f)
+ })
+ if err != nil {
+ return 0, fmt.Errorf("lazy load symbols: %w", err)
+ }
+
+ address, ok := ex.addresses[symbol]
+ if !ok {
+ return 0, fmt.Errorf("symbol %s: %w", symbol, ErrNoSymbol)
+ }
+
+ // Symbols with location 0 from section undef are shared library calls and
+ // are relocated before the binary is executed. Dynamic linking is not
+ // implemented by the library, so mark this as unsupported for now.
+ //
+ // Since only offset values are stored and not elf.Symbol, if the value is 0,
+ // assume it's an external symbol.
+ if address == 0 {
+ return 0, fmt.Errorf("cannot resolve %s library call '%s': %w "+
+ "(consider providing UprobeOptions.Address)", ex.path, symbol, ErrNotSupported)
+ }
+
+ return address + opts.Offset, nil
+}
+
+// Uprobe attaches the given eBPF program to a perf event that fires when the
+// given symbol starts executing in the given Executable.
+// For example, /bin/bash::main():
+//
+// ex, _ = OpenExecutable("/bin/bash")
+// ex.Uprobe("main", prog, nil)
+//
+// When using symbols which belongs to shared libraries,
+// an offset must be provided via options:
+//
+// up, err := ex.Uprobe("main", prog, &UprobeOptions{Offset: 0x123})
+//
+// Note: Setting the Offset field in the options supersedes the symbol's offset.
+//
+// Losing the reference to the resulting Link (up) will close the Uprobe
+// and prevent further execution of prog. The Link must be Closed during
+// program shutdown to avoid leaking system resources.
+//
+// Functions provided by shared libraries can currently not be traced and
+// will result in an ErrNotSupported.
+func (ex *Executable) Uprobe(symbol string, prog *ebpf.Program, opts *UprobeOptions) (Link, error) {
+ u, err := ex.uprobe(symbol, prog, opts, false)
+ if err != nil {
+ return nil, err
+ }
+
+ lnk, err := attachPerfEvent(u, prog, opts.cookie())
+ if err != nil {
+ u.Close()
+ return nil, err
+ }
+
+ return lnk, nil
+}
+
+// Uretprobe attaches the given eBPF program to a perf event that fires right
+// before the given symbol exits. For example, /bin/bash::main():
+//
+// ex, _ = OpenExecutable("/bin/bash")
+// ex.Uretprobe("main", prog, nil)
+//
+// When using symbols which belongs to shared libraries,
+// an offset must be provided via options:
+//
+// up, err := ex.Uretprobe("main", prog, &UprobeOptions{Offset: 0x123})
+//
+// Note: Setting the Offset field in the options supersedes the symbol's offset.
+//
+// Losing the reference to the resulting Link (up) will close the Uprobe
+// and prevent further execution of prog. The Link must be Closed during
+// program shutdown to avoid leaking system resources.
+//
+// Functions provided by shared libraries can currently not be traced and
+// will result in an ErrNotSupported.
+func (ex *Executable) Uretprobe(symbol string, prog *ebpf.Program, opts *UprobeOptions) (Link, error) {
+ u, err := ex.uprobe(symbol, prog, opts, true)
+ if err != nil {
+ return nil, err
+ }
+
+ lnk, err := attachPerfEvent(u, prog, opts.cookie())
+ if err != nil {
+ u.Close()
+ return nil, err
+ }
+
+ return lnk, nil
+}
+
+// uprobe opens a perf event for the given binary/symbol and attaches prog to it.
+// If ret is true, create a uretprobe.
+func (ex *Executable) uprobe(symbol string, prog *ebpf.Program, opts *UprobeOptions, ret bool) (*perfEvent, error) {
+ if prog == nil {
+ return nil, fmt.Errorf("prog cannot be nil: %w", errInvalidInput)
+ }
+ if prog.Type() != ebpf.Kprobe {
+ return nil, fmt.Errorf("eBPF program type %s is not Kprobe: %w", prog.Type(), errInvalidInput)
+ }
+ if opts == nil {
+ opts = &UprobeOptions{}
+ }
+
+ offset, err := ex.address(symbol, opts)
+ if err != nil {
+ return nil, err
+ }
+
+ pid := opts.PID
+ if pid == 0 {
+ pid = perfAllThreads
+ }
+
+ if opts.RefCtrOffset != 0 {
+ if err := haveRefCtrOffsetPMU(); err != nil {
+ return nil, fmt.Errorf("uprobe ref_ctr_offset: %w", err)
+ }
+ }
+
+ args := tracefs.ProbeArgs{
+ Type: tracefs.Uprobe,
+ Symbol: symbol,
+ Path: ex.path,
+ Offset: offset,
+ Pid: pid,
+ RefCtrOffset: opts.RefCtrOffset,
+ Ret: ret,
+ Cookie: opts.Cookie,
+ Group: opts.TraceFSPrefix,
+ }
+
+ // Use uprobe PMU if the kernel has it available.
+ tp, err := pmuProbe(args)
+ if err == nil {
+ return tp, nil
+ }
+ if err != nil && !errors.Is(err, ErrNotSupported) {
+ return nil, fmt.Errorf("creating perf_uprobe PMU: %w", err)
+ }
+
+ // Use tracefs if uprobe PMU is missing.
+ tp, err = tracefsProbe(args)
+ if err != nil {
+ return nil, fmt.Errorf("creating trace event '%s:%s' in tracefs: %w", ex.path, symbol, err)
+ }
+
+ return tp, nil
+}
diff --git a/vendor/github.com/cilium/ebpf/link/xdp.go b/vendor/github.com/cilium/ebpf/link/xdp.go
new file mode 100644
index 000000000..aa8dd3a4c
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/link/xdp.go
@@ -0,0 +1,54 @@
+package link
+
+import (
+ "fmt"
+
+ "github.com/cilium/ebpf"
+)
+
+// XDPAttachFlags represents how XDP program will be attached to interface.
+type XDPAttachFlags uint32
+
+const (
+ // XDPGenericMode (SKB) links XDP BPF program for drivers which do
+ // not yet support native XDP.
+ XDPGenericMode XDPAttachFlags = 1 << (iota + 1)
+ // XDPDriverMode links XDP BPF program into the driver’s receive path.
+ XDPDriverMode
+ // XDPOffloadMode offloads the entire XDP BPF program into hardware.
+ XDPOffloadMode
+)
+
+type XDPOptions struct {
+ // Program must be an XDP BPF program.
+ Program *ebpf.Program
+
+ // Interface is the interface index to attach program to.
+ Interface int
+
+ // Flags is one of XDPAttachFlags (optional).
+ //
+ // Only one XDP mode should be set, without flag defaults
+ // to driver/generic mode (best effort).
+ Flags XDPAttachFlags
+}
+
+// AttachXDP links an XDP BPF program to an XDP hook.
+func AttachXDP(opts XDPOptions) (Link, error) {
+ if t := opts.Program.Type(); t != ebpf.XDP {
+ return nil, fmt.Errorf("invalid program type %s, expected XDP", t)
+ }
+
+ if opts.Interface < 1 {
+ return nil, fmt.Errorf("invalid interface index: %d", opts.Interface)
+ }
+
+ rawLink, err := AttachRawLink(RawLinkOptions{
+ Program: opts.Program,
+ Attach: ebpf.AttachXDP,
+ Target: opts.Interface,
+ Flags: uint32(opts.Flags),
+ })
+
+ return rawLink, err
+}
diff --git a/vendor/github.com/cilium/ebpf/linker.go b/vendor/github.com/cilium/ebpf/linker.go
new file mode 100644
index 000000000..b653b805e
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/linker.go
@@ -0,0 +1,401 @@
+package ebpf
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+
+ "github.com/cilium/ebpf/asm"
+ "github.com/cilium/ebpf/btf"
+ "github.com/cilium/ebpf/internal"
+)
+
+// handles stores handle objects to avoid gc cleanup
+type handles []*btf.Handle
+
+func (hs *handles) add(h *btf.Handle) (int, error) {
+ if h == nil {
+ return 0, nil
+ }
+
+ if len(*hs) == math.MaxInt16 {
+ return 0, fmt.Errorf("can't add more than %d module FDs to fdArray", math.MaxInt16)
+ }
+
+ *hs = append(*hs, h)
+
+ // return length of slice so that indexes start at 1
+ return len(*hs), nil
+}
+
+func (hs handles) fdArray() []int32 {
+ // first element of fda is reserved as no module can be indexed with 0
+ fda := []int32{0}
+ for _, h := range hs {
+ fda = append(fda, int32(h.FD()))
+ }
+
+ return fda
+}
+
+func (hs *handles) Close() error {
+ var errs []error
+ for _, h := range *hs {
+ errs = append(errs, h.Close())
+ }
+ return errors.Join(errs...)
+}
+
+// splitSymbols splits insns into subsections delimited by Symbol Instructions.
+// insns cannot be empty and must start with a Symbol Instruction.
+//
+// The resulting map is indexed by Symbol name.
+func splitSymbols(insns asm.Instructions) (map[string]asm.Instructions, error) {
+ if len(insns) == 0 {
+ return nil, errors.New("insns is empty")
+ }
+
+ if insns[0].Symbol() == "" {
+ return nil, errors.New("insns must start with a Symbol")
+ }
+
+ var name string
+ progs := make(map[string]asm.Instructions)
+ for _, ins := range insns {
+ if sym := ins.Symbol(); sym != "" {
+ if progs[sym] != nil {
+ return nil, fmt.Errorf("insns contains duplicate Symbol %s", sym)
+ }
+ name = sym
+ }
+
+ progs[name] = append(progs[name], ins)
+ }
+
+ return progs, nil
+}
+
+// The linker is responsible for resolving bpf-to-bpf calls between programs
+// within an ELF. Each BPF program must be a self-contained binary blob,
+// so when an instruction in one ELF program section wants to jump to
+// a function in another, the linker needs to pull in the bytecode
+// (and BTF info) of the target function and concatenate the instruction
+// streams.
+//
+// Later on in the pipeline, all call sites are fixed up with relative jumps
+// within this newly-created instruction stream to then finally hand off to
+// the kernel with BPF_PROG_LOAD.
+//
+// Each function is denoted by an ELF symbol and the compiler takes care of
+// register setup before each jump instruction.
+
+// hasFunctionReferences returns true if insns contains one or more bpf2bpf
+// function references.
+func hasFunctionReferences(insns asm.Instructions) bool {
+ for _, i := range insns {
+ if i.IsFunctionReference() {
+ return true
+ }
+ }
+ return false
+}
+
+// applyRelocations collects and applies any CO-RE relocations in insns.
+//
+// Passing a nil target will relocate against the running kernel. insns are
+// modified in place.
+func applyRelocations(insns asm.Instructions, target *btf.Spec, bo binary.ByteOrder) error {
+ var relos []*btf.CORERelocation
+ var reloInsns []*asm.Instruction
+ iter := insns.Iterate()
+ for iter.Next() {
+ if relo := btf.CORERelocationMetadata(iter.Ins); relo != nil {
+ relos = append(relos, relo)
+ reloInsns = append(reloInsns, iter.Ins)
+ }
+ }
+
+ if len(relos) == 0 {
+ return nil
+ }
+
+ if bo == nil {
+ bo = internal.NativeEndian
+ }
+
+ fixups, err := btf.CORERelocate(relos, target, bo)
+ if err != nil {
+ return err
+ }
+
+ for i, fixup := range fixups {
+ if err := fixup.Apply(reloInsns[i]); err != nil {
+ return fmt.Errorf("fixup for %s: %w", relos[i], err)
+ }
+ }
+
+ return nil
+}
+
+// flattenPrograms resolves bpf-to-bpf calls for a set of programs.
+//
+// Links all programs in names by modifying their ProgramSpec in progs.
+func flattenPrograms(progs map[string]*ProgramSpec, names []string) {
+ // Pre-calculate all function references.
+ refs := make(map[*ProgramSpec][]string)
+ for _, prog := range progs {
+ refs[prog] = prog.Instructions.FunctionReferences()
+ }
+
+ // Create a flattened instruction stream, but don't modify progs yet to
+ // avoid linking multiple times.
+ flattened := make([]asm.Instructions, 0, len(names))
+ for _, name := range names {
+ flattened = append(flattened, flattenInstructions(name, progs, refs))
+ }
+
+ // Finally, assign the flattened instructions.
+ for i, name := range names {
+ progs[name].Instructions = flattened[i]
+ }
+}
+
+// flattenInstructions resolves bpf-to-bpf calls for a single program.
+//
+// Flattens the instructions of prog by concatenating the instructions of all
+// direct and indirect dependencies.
+//
+// progs contains all referenceable programs, while refs contain the direct
+// dependencies of each program.
+func flattenInstructions(name string, progs map[string]*ProgramSpec, refs map[*ProgramSpec][]string) asm.Instructions {
+ prog := progs[name]
+
+ insns := make(asm.Instructions, len(prog.Instructions))
+ copy(insns, prog.Instructions)
+
+ // Add all direct references of prog to the list of to be linked programs.
+ pending := make([]string, len(refs[prog]))
+ copy(pending, refs[prog])
+
+ // All references for which we've appended instructions.
+ linked := make(map[string]bool)
+
+ // Iterate all pending references. We can't use a range since pending is
+ // modified in the body below.
+ for len(pending) > 0 {
+ var ref string
+ ref, pending = pending[0], pending[1:]
+
+ if linked[ref] {
+ // We've already linked this ref, don't append instructions again.
+ continue
+ }
+
+ progRef := progs[ref]
+ if progRef == nil {
+ // We don't have instructions that go with this reference. This
+ // happens when calling extern functions.
+ continue
+ }
+
+ insns = append(insns, progRef.Instructions...)
+ linked[ref] = true
+
+ // Make sure we link indirect references.
+ pending = append(pending, refs[progRef]...)
+ }
+
+ return insns
+}
+
+// fixupAndValidate is called by the ELF reader right before marshaling the
+// instruction stream. It performs last-minute adjustments to the program and
+// runs some sanity checks before sending it off to the kernel.
+func fixupAndValidate(insns asm.Instructions) error {
+ iter := insns.Iterate()
+ for iter.Next() {
+ ins := iter.Ins
+
+ // Map load was tagged with a Reference, but does not contain a Map pointer.
+ needsMap := ins.Reference() != "" || ins.Metadata.Get(kconfigMetaKey{}) != nil
+ if ins.IsLoadFromMap() && needsMap && ins.Map() == nil {
+ return fmt.Errorf("instruction %d: %w", iter.Index, asm.ErrUnsatisfiedMapReference)
+ }
+
+ fixupProbeReadKernel(ins)
+ }
+
+ return nil
+}
+
+// fixupKfuncs loops over all instructions in search for kfunc calls.
+// If at least one is found, the current kernels BTF and module BTFis are searched to set Instruction.Constant
+// and Instruction.Offset to the correct values.
+func fixupKfuncs(insns asm.Instructions) (_ handles, err error) {
+ closeOnError := func(c io.Closer) {
+ if err != nil {
+ c.Close()
+ }
+ }
+
+ iter := insns.Iterate()
+ for iter.Next() {
+ ins := iter.Ins
+ if ins.IsKfuncCall() {
+ goto fixups
+ }
+ }
+
+ return nil, nil
+
+fixups:
+ // only load the kernel spec if we found at least one kfunc call
+ kernelSpec, err := btf.LoadKernelSpec()
+ if err != nil {
+ return nil, err
+ }
+
+ fdArray := make(handles, 0)
+ defer closeOnError(&fdArray)
+
+ for {
+ ins := iter.Ins
+
+ if !ins.IsKfuncCall() {
+ if !iter.Next() {
+ // break loop if this was the last instruction in the stream.
+ break
+ }
+ continue
+ }
+
+ // check meta, if no meta return err
+ kfm, _ := ins.Metadata.Get(kfuncMeta{}).(*btf.Func)
+ if kfm == nil {
+ return nil, fmt.Errorf("kfunc call has no kfuncMeta")
+ }
+
+ target := btf.Type((*btf.Func)(nil))
+ spec, module, err := findTargetInKernel(kernelSpec, kfm.Name, &target)
+ if errors.Is(err, btf.ErrNotFound) {
+ return nil, fmt.Errorf("kfunc %q: %w", kfm.Name, ErrNotSupported)
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ idx, err := fdArray.add(module)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := btf.CheckTypeCompatibility(kfm.Type, target.(*btf.Func).Type); err != nil {
+ return nil, &incompatibleKfuncError{kfm.Name, err}
+ }
+
+ id, err := spec.TypeID(target)
+ if err != nil {
+ return nil, err
+ }
+
+ ins.Constant = int64(id)
+ ins.Offset = int16(idx)
+
+ if !iter.Next() {
+ break
+ }
+ }
+
+ return fdArray, nil
+}
+
+type incompatibleKfuncError struct {
+ name string
+ err error
+}
+
+func (ike *incompatibleKfuncError) Error() string {
+ return fmt.Sprintf("kfunc %q: %s", ike.name, ike.err)
+}
+
+// fixupProbeReadKernel replaces calls to bpf_probe_read_{kernel,user}(_str)
+// with bpf_probe_read(_str) on kernels that don't support it yet.
+func fixupProbeReadKernel(ins *asm.Instruction) {
+ if !ins.IsBuiltinCall() {
+ return
+ }
+
+ // Kernel supports bpf_probe_read_kernel, nothing to do.
+ if haveProbeReadKernel() == nil {
+ return
+ }
+
+ switch asm.BuiltinFunc(ins.Constant) {
+ case asm.FnProbeReadKernel, asm.FnProbeReadUser:
+ ins.Constant = int64(asm.FnProbeRead)
+ case asm.FnProbeReadKernelStr, asm.FnProbeReadUserStr:
+ ins.Constant = int64(asm.FnProbeReadStr)
+ }
+}
+
+// resolveKconfigReferences creates and populates a .kconfig map if necessary.
+//
+// Returns a nil Map and no error if no references exist.
+func resolveKconfigReferences(insns asm.Instructions) (_ *Map, err error) {
+ closeOnError := func(c io.Closer) {
+ if err != nil {
+ c.Close()
+ }
+ }
+
+ var spec *MapSpec
+ iter := insns.Iterate()
+ for iter.Next() {
+ meta, _ := iter.Ins.Metadata.Get(kconfigMetaKey{}).(*kconfigMeta)
+ if meta != nil {
+ spec = meta.Map
+ break
+ }
+ }
+
+ if spec == nil {
+ return nil, nil
+ }
+
+ cpy := spec.Copy()
+ if err := resolveKconfig(cpy); err != nil {
+ return nil, err
+ }
+
+ kconfig, err := NewMap(cpy)
+ if err != nil {
+ return nil, err
+ }
+ defer closeOnError(kconfig)
+
+ // Resolve all instructions which load from .kconfig map with actual map
+ // and offset inside it.
+ iter = insns.Iterate()
+ for iter.Next() {
+ meta, _ := iter.Ins.Metadata.Get(kconfigMetaKey{}).(*kconfigMeta)
+ if meta == nil {
+ continue
+ }
+
+ if meta.Map != spec {
+ return nil, fmt.Errorf("instruction %d: reference to multiple .kconfig maps is not allowed", iter.Index)
+ }
+
+ if err := iter.Ins.AssociateMap(kconfig); err != nil {
+ return nil, fmt.Errorf("instruction %d: %w", iter.Index, err)
+ }
+
+ // Encode a map read at the offset of the var in the datasec.
+ iter.Ins.Constant = int64(uint64(meta.Offset) << 32)
+ iter.Ins.Metadata.Set(kconfigMetaKey{}, nil)
+ }
+
+ return kconfig, nil
+}
diff --git a/vendor/github.com/cilium/ebpf/map.go b/vendor/github.com/cilium/ebpf/map.go
new file mode 100644
index 000000000..be732a24f
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/map.go
@@ -0,0 +1,1480 @@
+package ebpf
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "math/rand"
+ "os"
+ "path/filepath"
+ "reflect"
+ "time"
+ "unsafe"
+
+ "github.com/cilium/ebpf/btf"
+ "github.com/cilium/ebpf/internal"
+ "github.com/cilium/ebpf/internal/sys"
+ "github.com/cilium/ebpf/internal/sysenc"
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+// Errors returned by Map and MapIterator methods.
+var (
+ ErrKeyNotExist = errors.New("key does not exist")
+ ErrKeyExist = errors.New("key already exists")
+ ErrIterationAborted = errors.New("iteration aborted")
+ ErrMapIncompatible = errors.New("map spec is incompatible with existing map")
+ errMapNoBTFValue = errors.New("map spec does not contain a BTF Value")
+)
+
+// MapOptions control loading a map into the kernel.
+type MapOptions struct {
+ // The base path to pin maps in if requested via PinByName.
+ // Existing maps will be re-used if they are compatible, otherwise an
+ // error is returned.
+ PinPath string
+ LoadPinOptions LoadPinOptions
+}
+
+// MapID represents the unique ID of an eBPF map
+type MapID uint32
+
+// MapSpec defines a Map.
+type MapSpec struct {
+ // Name is passed to the kernel as a debug aid. Must only contain
+ // alpha numeric and '_' characters.
+ Name string
+ Type MapType
+ KeySize uint32
+ ValueSize uint32
+ MaxEntries uint32
+
+ // Flags is passed to the kernel and specifies additional map
+ // creation attributes.
+ Flags uint32
+
+ // Automatically pin and load a map from MapOptions.PinPath.
+ // Generates an error if an existing pinned map is incompatible with the MapSpec.
+ Pinning PinType
+
+ // Specify numa node during map creation
+ // (effective only if unix.BPF_F_NUMA_NODE flag is set,
+ // which can be imported from golang.org/x/sys/unix)
+ NumaNode uint32
+
+ // The initial contents of the map. May be nil.
+ Contents []MapKV
+
+ // Whether to freeze a map after setting its initial contents.
+ Freeze bool
+
+ // InnerMap is used as a template for ArrayOfMaps and HashOfMaps
+ InnerMap *MapSpec
+
+ // Extra trailing bytes found in the ELF map definition when using structs
+ // larger than libbpf's bpf_map_def. nil if no trailing bytes were present.
+ // Must be nil or empty before instantiating the MapSpec into a Map.
+ Extra *bytes.Reader
+
+ // The key and value type of this map. May be nil.
+ Key, Value btf.Type
+}
+
+func (ms *MapSpec) String() string {
+ return fmt.Sprintf("%s(keySize=%d, valueSize=%d, maxEntries=%d, flags=%d)", ms.Type, ms.KeySize, ms.ValueSize, ms.MaxEntries, ms.Flags)
+}
+
+// Copy returns a copy of the spec.
+//
+// MapSpec.Contents is a shallow copy.
+func (ms *MapSpec) Copy() *MapSpec {
+ if ms == nil {
+ return nil
+ }
+
+ cpy := *ms
+
+ cpy.Contents = make([]MapKV, len(ms.Contents))
+ copy(cpy.Contents, ms.Contents)
+
+ cpy.InnerMap = ms.InnerMap.Copy()
+
+ return &cpy
+}
+
+// fixupMagicFields fills fields of MapSpec which are usually
+// left empty in ELF or which depend on runtime information.
+//
+// The method doesn't modify Spec, instead returning a copy.
+// The copy is only performed if fixups are necessary, so callers mustn't mutate
+// the returned spec.
+func (spec *MapSpec) fixupMagicFields() (*MapSpec, error) {
+ switch spec.Type {
+ case ArrayOfMaps, HashOfMaps:
+ if spec.ValueSize != 0 && spec.ValueSize != 4 {
+ return nil, errors.New("ValueSize must be zero or four for map of map")
+ }
+
+ spec = spec.Copy()
+ spec.ValueSize = 4
+
+ case PerfEventArray:
+ if spec.KeySize != 0 && spec.KeySize != 4 {
+ return nil, errors.New("KeySize must be zero or four for perf event array")
+ }
+
+ if spec.ValueSize != 0 && spec.ValueSize != 4 {
+ return nil, errors.New("ValueSize must be zero or four for perf event array")
+ }
+
+ spec = spec.Copy()
+ spec.KeySize = 4
+ spec.ValueSize = 4
+
+ n, err := internal.PossibleCPUs()
+ if err != nil {
+ return nil, fmt.Errorf("fixup perf event array: %w", err)
+ }
+
+ if n := uint32(n); spec.MaxEntries == 0 || spec.MaxEntries > n {
+ // MaxEntries should be zero most of the time, but there is code
+ // out there which hardcodes large constants. Clamp the number
+ // of entries to the number of CPUs at most. Allow creating maps with
+ // less than n items since some kernel selftests relied on this
+ // behaviour in the past.
+ spec.MaxEntries = n
+ }
+ }
+
+ return spec, nil
+}
+
+// dataSection returns the contents and BTF Datasec descriptor of the spec.
+func (ms *MapSpec) dataSection() ([]byte, *btf.Datasec, error) {
+ if ms.Value == nil {
+ return nil, nil, errMapNoBTFValue
+ }
+
+ ds, ok := ms.Value.(*btf.Datasec)
+ if !ok {
+ return nil, nil, fmt.Errorf("map value BTF is a %T, not a *btf.Datasec", ms.Value)
+ }
+
+ if n := len(ms.Contents); n != 1 {
+ return nil, nil, fmt.Errorf("expected one key, found %d", n)
+ }
+
+ kv := ms.Contents[0]
+ value, ok := kv.Value.([]byte)
+ if !ok {
+ return nil, nil, fmt.Errorf("value at first map key is %T, not []byte", kv.Value)
+ }
+
+ return value, ds, nil
+}
+
+// MapKV is used to initialize the contents of a Map.
+type MapKV struct {
+ Key interface{}
+ Value interface{}
+}
+
+// Compatible returns nil if an existing map may be used instead of creating
+// one from the spec.
+//
+// Returns an error wrapping [ErrMapIncompatible] otherwise.
+func (ms *MapSpec) Compatible(m *Map) error {
+ ms, err := ms.fixupMagicFields()
+ if err != nil {
+ return err
+ }
+
+ switch {
+ case m.typ != ms.Type:
+ return fmt.Errorf("expected type %v, got %v: %w", ms.Type, m.typ, ErrMapIncompatible)
+
+ case m.keySize != ms.KeySize:
+ return fmt.Errorf("expected key size %v, got %v: %w", ms.KeySize, m.keySize, ErrMapIncompatible)
+
+ case m.valueSize != ms.ValueSize:
+ return fmt.Errorf("expected value size %v, got %v: %w", ms.ValueSize, m.valueSize, ErrMapIncompatible)
+
+ case m.maxEntries != ms.MaxEntries:
+ return fmt.Errorf("expected max entries %v, got %v: %w", ms.MaxEntries, m.maxEntries, ErrMapIncompatible)
+
+ // BPF_F_RDONLY_PROG is set unconditionally for devmaps. Explicitly allow
+ // this mismatch.
+ case !((ms.Type == DevMap || ms.Type == DevMapHash) && m.flags^ms.Flags == unix.BPF_F_RDONLY_PROG) &&
+ m.flags != ms.Flags:
+ return fmt.Errorf("expected flags %v, got %v: %w", ms.Flags, m.flags, ErrMapIncompatible)
+ }
+ return nil
+}
+
+// Map represents a Map file descriptor.
+//
+// It is not safe to close a map which is used by other goroutines.
+//
+// Methods which take interface{} arguments by default encode
+// them using binary.Read/Write in the machine's native endianness.
+//
+// Implement encoding.BinaryMarshaler or encoding.BinaryUnmarshaler
+// if you require custom encoding.
+type Map struct {
+ name string
+ fd *sys.FD
+ typ MapType
+ keySize uint32
+ valueSize uint32
+ maxEntries uint32
+ flags uint32
+ pinnedPath string
+ // Per CPU maps return values larger than the size in the spec
+ fullValueSize int
+}
+
+// NewMapFromFD creates a map from a raw fd.
+//
+// You should not use fd after calling this function.
+func NewMapFromFD(fd int) (*Map, error) {
+ f, err := sys.NewFD(fd)
+ if err != nil {
+ return nil, err
+ }
+
+ return newMapFromFD(f)
+}
+
+func newMapFromFD(fd *sys.FD) (*Map, error) {
+ info, err := newMapInfoFromFd(fd)
+ if err != nil {
+ fd.Close()
+ return nil, fmt.Errorf("get map info: %w", err)
+ }
+
+ return newMap(fd, info.Name, info.Type, info.KeySize, info.ValueSize, info.MaxEntries, info.Flags)
+}
+
+// NewMap creates a new Map.
+//
+// It's equivalent to calling NewMapWithOptions with default options.
+func NewMap(spec *MapSpec) (*Map, error) {
+ return NewMapWithOptions(spec, MapOptions{})
+}
+
+// NewMapWithOptions creates a new Map.
+//
+// Creating a map for the first time will perform feature detection
+// by creating small, temporary maps.
+//
+// The caller is responsible for ensuring the process' rlimit is set
+// sufficiently high for locking memory during map creation. This can be done
+// by calling rlimit.RemoveMemlock() prior to calling NewMapWithOptions.
+//
+// May return an error wrapping ErrMapIncompatible.
+func NewMapWithOptions(spec *MapSpec, opts MapOptions) (*Map, error) {
+ m, err := newMapWithOptions(spec, opts)
+ if err != nil {
+ return nil, fmt.Errorf("creating map: %w", err)
+ }
+
+ if err := m.finalize(spec); err != nil {
+ m.Close()
+ return nil, fmt.Errorf("populating map: %w", err)
+ }
+
+ return m, nil
+}
+
+func newMapWithOptions(spec *MapSpec, opts MapOptions) (_ *Map, err error) {
+ closeOnError := func(c io.Closer) {
+ if err != nil {
+ c.Close()
+ }
+ }
+
+ switch spec.Pinning {
+ case PinByName:
+ if spec.Name == "" {
+ return nil, fmt.Errorf("pin by name: missing Name")
+ }
+
+ if opts.PinPath == "" {
+ return nil, fmt.Errorf("pin by name: missing MapOptions.PinPath")
+ }
+
+ path := filepath.Join(opts.PinPath, spec.Name)
+ m, err := LoadPinnedMap(path, &opts.LoadPinOptions)
+ if errors.Is(err, unix.ENOENT) {
+ break
+ }
+ if err != nil {
+ return nil, fmt.Errorf("load pinned map: %w", err)
+ }
+ defer closeOnError(m)
+
+ if err := spec.Compatible(m); err != nil {
+ return nil, fmt.Errorf("use pinned map %s: %w", spec.Name, err)
+ }
+
+ return m, nil
+
+ case PinNone:
+ // Nothing to do here
+
+ default:
+ return nil, fmt.Errorf("pin type %d: %w", int(spec.Pinning), ErrNotSupported)
+ }
+
+ var innerFd *sys.FD
+ if spec.Type == ArrayOfMaps || spec.Type == HashOfMaps {
+ if spec.InnerMap == nil {
+ return nil, fmt.Errorf("%s requires InnerMap", spec.Type)
+ }
+
+ if spec.InnerMap.Pinning != PinNone {
+ return nil, errors.New("inner maps cannot be pinned")
+ }
+
+ template, err := spec.InnerMap.createMap(nil, opts)
+ if err != nil {
+ return nil, fmt.Errorf("inner map: %w", err)
+ }
+ defer template.Close()
+
+ // Intentionally skip populating and freezing (finalizing)
+ // the inner map template since it will be removed shortly.
+
+ innerFd = template.fd
+ }
+
+ m, err := spec.createMap(innerFd, opts)
+ if err != nil {
+ return nil, err
+ }
+ defer closeOnError(m)
+
+ if spec.Pinning == PinByName {
+ path := filepath.Join(opts.PinPath, spec.Name)
+ if err := m.Pin(path); err != nil {
+ return nil, fmt.Errorf("pin map to %s: %w", path, err)
+ }
+ }
+
+ return m, nil
+}
+
+// createMap validates the spec's properties and creates the map in the kernel
+// using the given opts. It does not populate or freeze the map.
+func (spec *MapSpec) createMap(inner *sys.FD, opts MapOptions) (_ *Map, err error) {
+ closeOnError := func(closer io.Closer) {
+ if err != nil {
+ closer.Close()
+ }
+ }
+
+ // Kernels 4.13 through 5.4 used a struct bpf_map_def that contained
+ // additional 'inner_map_idx' and later 'numa_node' fields.
+ // In order to support loading these definitions, tolerate the presence of
+ // extra bytes, but require them to be zeroes.
+ if spec.Extra != nil {
+ if _, err := io.Copy(internal.DiscardZeroes{}, spec.Extra); err != nil {
+ return nil, errors.New("extra contains unhandled non-zero bytes, drain before creating map")
+ }
+ }
+
+ spec, err = spec.fixupMagicFields()
+ if err != nil {
+ return nil, err
+ }
+
+ attr := sys.MapCreateAttr{
+ MapType: sys.MapType(spec.Type),
+ KeySize: spec.KeySize,
+ ValueSize: spec.ValueSize,
+ MaxEntries: spec.MaxEntries,
+ MapFlags: sys.MapFlags(spec.Flags),
+ NumaNode: spec.NumaNode,
+ }
+
+ if inner != nil {
+ attr.InnerMapFd = inner.Uint()
+ }
+
+ if haveObjName() == nil {
+ attr.MapName = sys.NewObjName(spec.Name)
+ }
+
+ if spec.Key != nil || spec.Value != nil {
+ handle, keyTypeID, valueTypeID, err := btf.MarshalMapKV(spec.Key, spec.Value)
+ if err != nil && !errors.Is(err, btf.ErrNotSupported) {
+ return nil, fmt.Errorf("load BTF: %w", err)
+ }
+
+ if handle != nil {
+ defer handle.Close()
+
+ // Use BTF k/v during map creation.
+ attr.BtfFd = uint32(handle.FD())
+ attr.BtfKeyTypeId = keyTypeID
+ attr.BtfValueTypeId = valueTypeID
+ }
+ }
+
+ fd, err := sys.MapCreate(&attr)
+
+ // Some map types don't support BTF k/v in earlier kernel versions.
+ // Remove BTF metadata and retry map creation.
+ if (errors.Is(err, sys.ENOTSUPP) || errors.Is(err, unix.EINVAL)) && attr.BtfFd != 0 {
+ attr.BtfFd, attr.BtfKeyTypeId, attr.BtfValueTypeId = 0, 0, 0
+ fd, err = sys.MapCreate(&attr)
+ }
+ if err != nil {
+ return nil, handleMapCreateError(attr, spec, err)
+ }
+
+ defer closeOnError(fd)
+ m, err := newMap(fd, spec.Name, spec.Type, spec.KeySize, spec.ValueSize, spec.MaxEntries, spec.Flags)
+ if err != nil {
+ return nil, fmt.Errorf("map create: %w", err)
+ }
+ return m, nil
+}
+
+func handleMapCreateError(attr sys.MapCreateAttr, spec *MapSpec, err error) error {
+ if errors.Is(err, unix.EPERM) {
+ return fmt.Errorf("map create: %w (MEMLOCK may be too low, consider rlimit.RemoveMemlock)", err)
+ }
+ if errors.Is(err, unix.EINVAL) && spec.MaxEntries == 0 {
+ return fmt.Errorf("map create: %w (MaxEntries may be incorrectly set to zero)", err)
+ }
+ if errors.Is(err, unix.EINVAL) && spec.Type == UnspecifiedMap {
+ return fmt.Errorf("map create: cannot use type %s", UnspecifiedMap)
+ }
+ if errors.Is(err, unix.EINVAL) && spec.Flags&unix.BPF_F_NO_PREALLOC > 0 {
+ return fmt.Errorf("map create: %w (noPrealloc flag may be incompatible with map type %s)", err, spec.Type)
+ }
+
+ switch spec.Type {
+ case ArrayOfMaps, HashOfMaps:
+ if haveFeatErr := haveNestedMaps(); haveFeatErr != nil {
+ return fmt.Errorf("map create: %w", haveFeatErr)
+ }
+ }
+ if spec.Flags&(unix.BPF_F_RDONLY_PROG|unix.BPF_F_WRONLY_PROG) > 0 || spec.Freeze {
+ if haveFeatErr := haveMapMutabilityModifiers(); haveFeatErr != nil {
+ return fmt.Errorf("map create: %w", haveFeatErr)
+ }
+ }
+ if spec.Flags&unix.BPF_F_MMAPABLE > 0 {
+ if haveFeatErr := haveMmapableMaps(); haveFeatErr != nil {
+ return fmt.Errorf("map create: %w", haveFeatErr)
+ }
+ }
+ if spec.Flags&unix.BPF_F_INNER_MAP > 0 {
+ if haveFeatErr := haveInnerMaps(); haveFeatErr != nil {
+ return fmt.Errorf("map create: %w", haveFeatErr)
+ }
+ }
+ if spec.Flags&unix.BPF_F_NO_PREALLOC > 0 {
+ if haveFeatErr := haveNoPreallocMaps(); haveFeatErr != nil {
+ return fmt.Errorf("map create: %w", haveFeatErr)
+ }
+ }
+ if attr.BtfFd == 0 {
+ return fmt.Errorf("map create: %w (without BTF k/v)", err)
+ }
+
+ return fmt.Errorf("map create: %w", err)
+}
+
+// newMap allocates and returns a new Map structure.
+// Sets the fullValueSize on per-CPU maps.
+func newMap(fd *sys.FD, name string, typ MapType, keySize, valueSize, maxEntries, flags uint32) (*Map, error) {
+ m := &Map{
+ name,
+ fd,
+ typ,
+ keySize,
+ valueSize,
+ maxEntries,
+ flags,
+ "",
+ int(valueSize),
+ }
+
+ if !typ.hasPerCPUValue() {
+ return m, nil
+ }
+
+ possibleCPUs, err := internal.PossibleCPUs()
+ if err != nil {
+ return nil, err
+ }
+
+ m.fullValueSize = int(internal.Align(valueSize, 8)) * possibleCPUs
+ return m, nil
+}
+
+func (m *Map) String() string {
+ if m.name != "" {
+ return fmt.Sprintf("%s(%s)#%v", m.typ, m.name, m.fd)
+ }
+ return fmt.Sprintf("%s#%v", m.typ, m.fd)
+}
+
+// Type returns the underlying type of the map.
+func (m *Map) Type() MapType {
+ return m.typ
+}
+
+// KeySize returns the size of the map key in bytes.
+func (m *Map) KeySize() uint32 {
+ return m.keySize
+}
+
+// ValueSize returns the size of the map value in bytes.
+func (m *Map) ValueSize() uint32 {
+ return m.valueSize
+}
+
+// MaxEntries returns the maximum number of elements the map can hold.
+func (m *Map) MaxEntries() uint32 {
+ return m.maxEntries
+}
+
+// Flags returns the flags of the map.
+func (m *Map) Flags() uint32 {
+ return m.flags
+}
+
+// Info returns metadata about the map.
+func (m *Map) Info() (*MapInfo, error) {
+ return newMapInfoFromFd(m.fd)
+}
+
+// MapLookupFlags controls the behaviour of the map lookup calls.
+type MapLookupFlags uint64
+
+// LookupLock look up the value of a spin-locked map.
+const LookupLock MapLookupFlags = 4
+
+// Lookup retrieves a value from a Map.
+//
+// Calls Close() on valueOut if it is of type **Map or **Program,
+// and *valueOut is not nil.
+//
+// Returns an error if the key doesn't exist, see ErrKeyNotExist.
+func (m *Map) Lookup(key, valueOut interface{}) error {
+ return m.LookupWithFlags(key, valueOut, 0)
+}
+
+// LookupWithFlags retrieves a value from a Map with flags.
+//
+// Passing LookupLock flag will look up the value of a spin-locked
+// map without returning the lock. This must be specified if the
+// elements contain a spinlock.
+//
+// Calls Close() on valueOut if it is of type **Map or **Program,
+// and *valueOut is not nil.
+//
+// Returns an error if the key doesn't exist, see ErrKeyNotExist.
+func (m *Map) LookupWithFlags(key, valueOut interface{}, flags MapLookupFlags) error {
+ if m.typ.hasPerCPUValue() {
+ return m.lookupPerCPU(key, valueOut, flags)
+ }
+
+ valueBytes := makeMapSyscallOutput(valueOut, m.fullValueSize)
+ if err := m.lookup(key, valueBytes.Pointer(), flags); err != nil {
+ return err
+ }
+
+ return m.unmarshalValue(valueOut, valueBytes)
+}
+
+// LookupAndDelete retrieves and deletes a value from a Map.
+//
+// Returns ErrKeyNotExist if the key doesn't exist.
+func (m *Map) LookupAndDelete(key, valueOut interface{}) error {
+ return m.LookupAndDeleteWithFlags(key, valueOut, 0)
+}
+
+// LookupAndDeleteWithFlags retrieves and deletes a value from a Map.
+//
+// Passing LookupLock flag will look up and delete the value of a spin-locked
+// map without returning the lock. This must be specified if the elements
+// contain a spinlock.
+//
+// Returns ErrKeyNotExist if the key doesn't exist.
+func (m *Map) LookupAndDeleteWithFlags(key, valueOut interface{}, flags MapLookupFlags) error {
+ if m.typ.hasPerCPUValue() {
+ return m.lookupAndDeletePerCPU(key, valueOut, flags)
+ }
+
+ valueBytes := makeMapSyscallOutput(valueOut, m.fullValueSize)
+ if err := m.lookupAndDelete(key, valueBytes.Pointer(), flags); err != nil {
+ return err
+ }
+ return m.unmarshalValue(valueOut, valueBytes)
+}
+
+// LookupBytes gets a value from Map.
+//
+// Returns a nil value if a key doesn't exist.
+func (m *Map) LookupBytes(key interface{}) ([]byte, error) {
+ valueBytes := make([]byte, m.fullValueSize)
+ valuePtr := sys.NewSlicePointer(valueBytes)
+
+ err := m.lookup(key, valuePtr, 0)
+ if errors.Is(err, ErrKeyNotExist) {
+ return nil, nil
+ }
+
+ return valueBytes, err
+}
+
+func (m *Map) lookupPerCPU(key, valueOut any, flags MapLookupFlags) error {
+ valueBytes := make([]byte, m.fullValueSize)
+ if err := m.lookup(key, sys.NewSlicePointer(valueBytes), flags); err != nil {
+ return err
+ }
+ return unmarshalPerCPUValue(valueOut, int(m.valueSize), valueBytes)
+}
+
+func (m *Map) lookup(key interface{}, valueOut sys.Pointer, flags MapLookupFlags) error {
+ keyPtr, err := m.marshalKey(key)
+ if err != nil {
+ return fmt.Errorf("can't marshal key: %w", err)
+ }
+
+ attr := sys.MapLookupElemAttr{
+ MapFd: m.fd.Uint(),
+ Key: keyPtr,
+ Value: valueOut,
+ Flags: uint64(flags),
+ }
+
+ if err = sys.MapLookupElem(&attr); err != nil {
+ return fmt.Errorf("lookup: %w", wrapMapError(err))
+ }
+ return nil
+}
+
+func (m *Map) lookupAndDeletePerCPU(key, valueOut any, flags MapLookupFlags) error {
+ valueBytes := make([]byte, m.fullValueSize)
+ if err := m.lookupAndDelete(key, sys.NewSlicePointer(valueBytes), flags); err != nil {
+ return err
+ }
+ return unmarshalPerCPUValue(valueOut, int(m.valueSize), valueBytes)
+}
+
+func (m *Map) lookupAndDelete(key any, valuePtr sys.Pointer, flags MapLookupFlags) error {
+ keyPtr, err := m.marshalKey(key)
+ if err != nil {
+ return fmt.Errorf("can't marshal key: %w", err)
+ }
+
+ attr := sys.MapLookupAndDeleteElemAttr{
+ MapFd: m.fd.Uint(),
+ Key: keyPtr,
+ Value: valuePtr,
+ Flags: uint64(flags),
+ }
+
+ if err := sys.MapLookupAndDeleteElem(&attr); err != nil {
+ return fmt.Errorf("lookup and delete: %w", wrapMapError(err))
+ }
+
+ return nil
+}
+
+// MapUpdateFlags controls the behaviour of the Map.Update call.
+//
+// The exact semantics depend on the specific MapType.
+type MapUpdateFlags uint64
+
+const (
+ // UpdateAny creates a new element or update an existing one.
+ UpdateAny MapUpdateFlags = iota
+ // UpdateNoExist creates a new element.
+ UpdateNoExist MapUpdateFlags = 1 << (iota - 1)
+ // UpdateExist updates an existing element.
+ UpdateExist
+ // UpdateLock updates elements under bpf_spin_lock.
+ UpdateLock
+)
+
+// Put replaces or creates a value in map.
+//
+// It is equivalent to calling Update with UpdateAny.
+func (m *Map) Put(key, value interface{}) error {
+ return m.Update(key, value, UpdateAny)
+}
+
+// Update changes the value of a key.
+func (m *Map) Update(key, value any, flags MapUpdateFlags) error {
+ if m.typ.hasPerCPUValue() {
+ return m.updatePerCPU(key, value, flags)
+ }
+
+ valuePtr, err := m.marshalValue(value)
+ if err != nil {
+ return fmt.Errorf("marshal value: %w", err)
+ }
+
+ return m.update(key, valuePtr, flags)
+}
+
+func (m *Map) updatePerCPU(key, value any, flags MapUpdateFlags) error {
+ valuePtr, err := marshalPerCPUValue(value, int(m.valueSize))
+ if err != nil {
+ return fmt.Errorf("marshal value: %w", err)
+ }
+
+ return m.update(key, valuePtr, flags)
+}
+
+func (m *Map) update(key any, valuePtr sys.Pointer, flags MapUpdateFlags) error {
+ keyPtr, err := m.marshalKey(key)
+ if err != nil {
+ return fmt.Errorf("marshal key: %w", err)
+ }
+
+ attr := sys.MapUpdateElemAttr{
+ MapFd: m.fd.Uint(),
+ Key: keyPtr,
+ Value: valuePtr,
+ Flags: uint64(flags),
+ }
+
+ if err = sys.MapUpdateElem(&attr); err != nil {
+ return fmt.Errorf("update: %w", wrapMapError(err))
+ }
+
+ return nil
+}
+
+// Delete removes a value.
+//
+// Returns ErrKeyNotExist if the key does not exist.
+func (m *Map) Delete(key interface{}) error {
+ keyPtr, err := m.marshalKey(key)
+ if err != nil {
+ return fmt.Errorf("can't marshal key: %w", err)
+ }
+
+ attr := sys.MapDeleteElemAttr{
+ MapFd: m.fd.Uint(),
+ Key: keyPtr,
+ }
+
+ if err = sys.MapDeleteElem(&attr); err != nil {
+ return fmt.Errorf("delete: %w", wrapMapError(err))
+ }
+ return nil
+}
+
+// NextKey finds the key following an initial key.
+//
+// See NextKeyBytes for details.
+//
+// Returns ErrKeyNotExist if there is no next key.
+func (m *Map) NextKey(key, nextKeyOut interface{}) error {
+ nextKeyBytes := makeMapSyscallOutput(nextKeyOut, int(m.keySize))
+
+ if err := m.nextKey(key, nextKeyBytes.Pointer()); err != nil {
+ return err
+ }
+
+ if err := nextKeyBytes.Unmarshal(nextKeyOut); err != nil {
+ return fmt.Errorf("can't unmarshal next key: %w", err)
+ }
+ return nil
+}
+
+// NextKeyBytes returns the key following an initial key as a byte slice.
+//
+// Passing nil will return the first key.
+//
+// Use Iterate if you want to traverse all entries in the map.
+//
+// Returns nil if there are no more keys.
+func (m *Map) NextKeyBytes(key interface{}) ([]byte, error) {
+ nextKey := make([]byte, m.keySize)
+ nextKeyPtr := sys.NewSlicePointer(nextKey)
+
+ err := m.nextKey(key, nextKeyPtr)
+ if errors.Is(err, ErrKeyNotExist) {
+ return nil, nil
+ }
+
+ return nextKey, err
+}
+
+func (m *Map) nextKey(key interface{}, nextKeyOut sys.Pointer) error {
+ var (
+ keyPtr sys.Pointer
+ err error
+ )
+
+ if key != nil {
+ keyPtr, err = m.marshalKey(key)
+ if err != nil {
+ return fmt.Errorf("can't marshal key: %w", err)
+ }
+ }
+
+ attr := sys.MapGetNextKeyAttr{
+ MapFd: m.fd.Uint(),
+ Key: keyPtr,
+ NextKey: nextKeyOut,
+ }
+
+ if err = sys.MapGetNextKey(&attr); err != nil {
+ // Kernels 4.4.131 and earlier return EFAULT instead of a pointer to the
+ // first map element when a nil key pointer is specified.
+ if key == nil && errors.Is(err, unix.EFAULT) {
+ var guessKey []byte
+ guessKey, err = m.guessNonExistentKey()
+ if err != nil {
+ return err
+ }
+
+ // Retry the syscall with a valid non-existing key.
+ attr.Key = sys.NewSlicePointer(guessKey)
+ if err = sys.MapGetNextKey(&attr); err == nil {
+ return nil
+ }
+ }
+
+ return fmt.Errorf("next key: %w", wrapMapError(err))
+ }
+
+ return nil
+}
+
+var mmapProtectedPage = internal.Memoize(func() ([]byte, error) {
+ return unix.Mmap(-1, 0, os.Getpagesize(), unix.PROT_NONE, unix.MAP_ANON|unix.MAP_SHARED)
+})
+
+// guessNonExistentKey attempts to perform a map lookup that returns ENOENT.
+// This is necessary on kernels before 4.4.132, since those don't support
+// iterating maps from the start by providing an invalid key pointer.
+func (m *Map) guessNonExistentKey() ([]byte, error) {
+ // Map a protected page and use that as the value pointer. This saves some
+ // work copying out the value, which we're not interested in.
+ page, err := mmapProtectedPage()
+ if err != nil {
+ return nil, err
+ }
+ valuePtr := sys.NewSlicePointer(page)
+
+ randKey := make([]byte, int(m.keySize))
+
+ for i := 0; i < 4; i++ {
+ switch i {
+ // For hash maps, the 0 key is less likely to be occupied. They're often
+ // used for storing data related to pointers, and their access pattern is
+ // generally scattered across the keyspace.
+ case 0:
+ // An all-0xff key is guaranteed to be out of bounds of any array, since
+ // those have a fixed key size of 4 bytes. The only corner case being
+ // arrays with 2^32 max entries, but those are prohibitively expensive
+ // in many environments.
+ case 1:
+ for r := range randKey {
+ randKey[r] = 0xff
+ }
+ // Inspired by BCC, 0x55 is an alternating binary pattern (0101), so
+ // is unlikely to be taken.
+ case 2:
+ for r := range randKey {
+ randKey[r] = 0x55
+ }
+ // Last ditch effort, generate a random key.
+ case 3:
+ rand.New(rand.NewSource(time.Now().UnixNano())).Read(randKey)
+ }
+
+ err := m.lookup(randKey, valuePtr, 0)
+ if errors.Is(err, ErrKeyNotExist) {
+ return randKey, nil
+ }
+ }
+
+ return nil, errors.New("couldn't find non-existing key")
+}
+
+// BatchLookup looks up many elements in a map at once.
+//
+// "keysOut" and "valuesOut" must be of type slice, a pointer
+// to a slice or buffer will not work.
+// "prevKey" is the key to start the batch lookup from, it will
+// *not* be included in the results. Use nil to start at the first key.
+//
+// ErrKeyNotExist is returned when the batch lookup has reached
+// the end of all possible results, even when partial results
+// are returned. It should be used to evaluate when lookup is "done".
+func (m *Map) BatchLookup(prevKey, nextKeyOut, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) {
+ return m.batchLookup(sys.BPF_MAP_LOOKUP_BATCH, prevKey, nextKeyOut, keysOut, valuesOut, opts)
+}
+
+// BatchLookupAndDelete looks up many elements in a map at once,
+//
+// It then deletes all those elements.
+// "keysOut" and "valuesOut" must be of type slice, a pointer
+// to a slice or buffer will not work.
+// "prevKey" is the key to start the batch lookup from, it will
+// *not* be included in the results. Use nil to start at the first key.
+//
+// ErrKeyNotExist is returned when the batch lookup has reached
+// the end of all possible results, even when partial results
+// are returned. It should be used to evaluate when lookup is "done".
+func (m *Map) BatchLookupAndDelete(prevKey, nextKeyOut, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) {
+ return m.batchLookup(sys.BPF_MAP_LOOKUP_AND_DELETE_BATCH, prevKey, nextKeyOut, keysOut, valuesOut, opts)
+}
+
+func (m *Map) batchLookup(cmd sys.Cmd, startKey, nextKeyOut, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) {
+ if err := haveBatchAPI(); err != nil {
+ return 0, err
+ }
+ if m.typ.hasPerCPUValue() {
+ return 0, ErrNotSupported
+ }
+ keysValue := reflect.ValueOf(keysOut)
+ if keysValue.Kind() != reflect.Slice {
+ return 0, fmt.Errorf("keys must be a slice")
+ }
+ valuesValue := reflect.ValueOf(valuesOut)
+ if valuesValue.Kind() != reflect.Slice {
+ return 0, fmt.Errorf("valuesOut must be a slice")
+ }
+ count := keysValue.Len()
+ if count != valuesValue.Len() {
+ return 0, fmt.Errorf("keysOut and valuesOut must be the same length")
+ }
+ keyBuf := make([]byte, count*int(m.keySize))
+ keyPtr := sys.NewSlicePointer(keyBuf)
+ valueBuf := make([]byte, count*int(m.fullValueSize))
+ valuePtr := sys.NewSlicePointer(valueBuf)
+ nextBuf := makeMapSyscallOutput(nextKeyOut, int(m.keySize))
+
+ attr := sys.MapLookupBatchAttr{
+ MapFd: m.fd.Uint(),
+ Keys: keyPtr,
+ Values: valuePtr,
+ Count: uint32(count),
+ OutBatch: nextBuf.Pointer(),
+ }
+
+ if opts != nil {
+ attr.ElemFlags = opts.ElemFlags
+ attr.Flags = opts.Flags
+ }
+
+ var err error
+ if startKey != nil {
+ attr.InBatch, err = marshalMapSyscallInput(startKey, int(m.keySize))
+ if err != nil {
+ return 0, err
+ }
+ }
+
+ _, sysErr := sys.BPF(cmd, unsafe.Pointer(&attr), unsafe.Sizeof(attr))
+ sysErr = wrapMapError(sysErr)
+ if sysErr != nil && !errors.Is(sysErr, unix.ENOENT) {
+ return 0, sysErr
+ }
+
+ err = nextBuf.Unmarshal(nextKeyOut)
+ if err != nil {
+ return 0, err
+ }
+ err = sysenc.Unmarshal(keysOut, keyBuf)
+ if err != nil {
+ return 0, err
+ }
+ err = sysenc.Unmarshal(valuesOut, valueBuf)
+ if err != nil {
+ return 0, err
+ }
+
+ return int(attr.Count), sysErr
+}
+
+// BatchUpdate updates the map with multiple keys and values
+// simultaneously.
+// "keys" and "values" must be of type slice, a pointer
+// to a slice or buffer will not work.
+func (m *Map) BatchUpdate(keys, values interface{}, opts *BatchOptions) (int, error) {
+ if m.typ.hasPerCPUValue() {
+ return 0, ErrNotSupported
+ }
+ keysValue := reflect.ValueOf(keys)
+ if keysValue.Kind() != reflect.Slice {
+ return 0, fmt.Errorf("keys must be a slice")
+ }
+ valuesValue := reflect.ValueOf(values)
+ if valuesValue.Kind() != reflect.Slice {
+ return 0, fmt.Errorf("values must be a slice")
+ }
+ var (
+ count = keysValue.Len()
+ valuePtr sys.Pointer
+ err error
+ )
+ if count != valuesValue.Len() {
+ return 0, fmt.Errorf("keys and values must be the same length")
+ }
+ keyPtr, err := marshalMapSyscallInput(keys, count*int(m.keySize))
+ if err != nil {
+ return 0, err
+ }
+ valuePtr, err = marshalMapSyscallInput(values, count*int(m.valueSize))
+ if err != nil {
+ return 0, err
+ }
+
+ attr := sys.MapUpdateBatchAttr{
+ MapFd: m.fd.Uint(),
+ Keys: keyPtr,
+ Values: valuePtr,
+ Count: uint32(count),
+ }
+ if opts != nil {
+ attr.ElemFlags = opts.ElemFlags
+ attr.Flags = opts.Flags
+ }
+
+ err = sys.MapUpdateBatch(&attr)
+ if err != nil {
+ if haveFeatErr := haveBatchAPI(); haveFeatErr != nil {
+ return 0, haveFeatErr
+ }
+ return int(attr.Count), fmt.Errorf("batch update: %w", wrapMapError(err))
+ }
+
+ return int(attr.Count), nil
+}
+
+// BatchDelete batch deletes entries in the map by keys.
+// "keys" must be of type slice, a pointer to a slice or buffer will not work.
+func (m *Map) BatchDelete(keys interface{}, opts *BatchOptions) (int, error) {
+ if m.typ.hasPerCPUValue() {
+ return 0, ErrNotSupported
+ }
+ keysValue := reflect.ValueOf(keys)
+ if keysValue.Kind() != reflect.Slice {
+ return 0, fmt.Errorf("keys must be a slice")
+ }
+ count := keysValue.Len()
+ keyPtr, err := marshalMapSyscallInput(keys, count*int(m.keySize))
+ if err != nil {
+ return 0, fmt.Errorf("cannot marshal keys: %v", err)
+ }
+
+ attr := sys.MapDeleteBatchAttr{
+ MapFd: m.fd.Uint(),
+ Keys: keyPtr,
+ Count: uint32(count),
+ }
+
+ if opts != nil {
+ attr.ElemFlags = opts.ElemFlags
+ attr.Flags = opts.Flags
+ }
+
+ if err = sys.MapDeleteBatch(&attr); err != nil {
+ if haveFeatErr := haveBatchAPI(); haveFeatErr != nil {
+ return 0, haveFeatErr
+ }
+ return int(attr.Count), fmt.Errorf("batch delete: %w", wrapMapError(err))
+ }
+
+ return int(attr.Count), nil
+}
+
+// Iterate traverses a map.
+//
+// It's safe to create multiple iterators at the same time.
+//
+// It's not possible to guarantee that all keys in a map will be
+// returned if there are concurrent modifications to the map.
+func (m *Map) Iterate() *MapIterator {
+ return newMapIterator(m)
+}
+
+// Close the Map's underlying file descriptor, which could unload the
+// Map from the kernel if it is not pinned or in use by a loaded Program.
+func (m *Map) Close() error {
+ if m == nil {
+ // This makes it easier to clean up when iterating maps
+ // of maps / programs.
+ return nil
+ }
+
+ return m.fd.Close()
+}
+
+// FD gets the file descriptor of the Map.
+//
+// Calling this function is invalid after Close has been called.
+func (m *Map) FD() int {
+ return m.fd.Int()
+}
+
+// Clone creates a duplicate of the Map.
+//
+// Closing the duplicate does not affect the original, and vice versa.
+// Changes made to the map are reflected by both instances however.
+// If the original map was pinned, the cloned map will not be pinned by default.
+//
+// Cloning a nil Map returns nil.
+func (m *Map) Clone() (*Map, error) {
+ if m == nil {
+ return nil, nil
+ }
+
+ dup, err := m.fd.Dup()
+ if err != nil {
+ return nil, fmt.Errorf("can't clone map: %w", err)
+ }
+
+ return &Map{
+ m.name,
+ dup,
+ m.typ,
+ m.keySize,
+ m.valueSize,
+ m.maxEntries,
+ m.flags,
+ "",
+ m.fullValueSize,
+ }, nil
+}
+
+// Pin persists the map on the BPF virtual file system past the lifetime of
+// the process that created it .
+//
+// Calling Pin on a previously pinned map will overwrite the path, except when
+// the new path already exists. Re-pinning across filesystems is not supported.
+// You can Clone a map to pin it to a different path.
+//
+// This requires bpffs to be mounted above fileName.
+// See https://docs.cilium.io/en/stable/network/kubernetes/configuration/#mounting-bpffs-with-systemd
+func (m *Map) Pin(fileName string) error {
+ if err := internal.Pin(m.pinnedPath, fileName, m.fd); err != nil {
+ return err
+ }
+ m.pinnedPath = fileName
+ return nil
+}
+
+// Unpin removes the persisted state for the map from the BPF virtual filesystem.
+//
+// Failed calls to Unpin will not alter the state returned by IsPinned.
+//
+// Unpinning an unpinned Map returns nil.
+func (m *Map) Unpin() error {
+ if err := internal.Unpin(m.pinnedPath); err != nil {
+ return err
+ }
+ m.pinnedPath = ""
+ return nil
+}
+
+// IsPinned returns true if the map has a non-empty pinned path.
+func (m *Map) IsPinned() bool {
+ return m.pinnedPath != ""
+}
+
+// Freeze prevents a map to be modified from user space.
+//
+// It makes no changes to kernel-side restrictions.
+func (m *Map) Freeze() error {
+ attr := sys.MapFreezeAttr{
+ MapFd: m.fd.Uint(),
+ }
+
+ if err := sys.MapFreeze(&attr); err != nil {
+ if haveFeatErr := haveMapMutabilityModifiers(); haveFeatErr != nil {
+ return fmt.Errorf("can't freeze map: %w", haveFeatErr)
+ }
+ return fmt.Errorf("can't freeze map: %w", err)
+ }
+ return nil
+}
+
+// finalize populates the Map according to the Contents specified
+// in spec and freezes the Map if requested by spec.
+func (m *Map) finalize(spec *MapSpec) error {
+ for _, kv := range spec.Contents {
+ if err := m.Put(kv.Key, kv.Value); err != nil {
+ return fmt.Errorf("putting value: key %v: %w", kv.Key, err)
+ }
+ }
+
+ if spec.Freeze {
+ if err := m.Freeze(); err != nil {
+ return fmt.Errorf("freezing map: %w", err)
+ }
+ }
+
+ return nil
+}
+
+func (m *Map) marshalKey(data interface{}) (sys.Pointer, error) {
+ if data == nil {
+ if m.keySize == 0 {
+ // Queues have a key length of zero, so passing nil here is valid.
+ return sys.NewPointer(nil), nil
+ }
+ return sys.Pointer{}, errors.New("can't use nil as key of map")
+ }
+
+ return marshalMapSyscallInput(data, int(m.keySize))
+}
+
+func (m *Map) marshalValue(data interface{}) (sys.Pointer, error) {
+ var (
+ buf []byte
+ err error
+ )
+
+ switch value := data.(type) {
+ case *Map:
+ if !m.typ.canStoreMap() {
+ return sys.Pointer{}, fmt.Errorf("can't store map in %s", m.typ)
+ }
+ buf, err = marshalMap(value, int(m.valueSize))
+
+ case *Program:
+ if !m.typ.canStoreProgram() {
+ return sys.Pointer{}, fmt.Errorf("can't store program in %s", m.typ)
+ }
+ buf, err = marshalProgram(value, int(m.valueSize))
+
+ default:
+ return marshalMapSyscallInput(data, int(m.valueSize))
+ }
+
+ if err != nil {
+ return sys.Pointer{}, err
+ }
+
+ return sys.NewSlicePointer(buf), nil
+}
+
+func (m *Map) unmarshalValue(value any, buf sysenc.Buffer) error {
+ switch value := value.(type) {
+ case **Map:
+ if !m.typ.canStoreMap() {
+ return fmt.Errorf("can't read a map from %s", m.typ)
+ }
+
+ other, err := unmarshalMap(buf)
+ if err != nil {
+ return err
+ }
+
+ // The caller might close the map externally, so ignore errors.
+ _ = (*value).Close()
+
+ *value = other
+ return nil
+
+ case *Map:
+ if !m.typ.canStoreMap() {
+ return fmt.Errorf("can't read a map from %s", m.typ)
+ }
+ return errors.New("require pointer to *Map")
+
+ case **Program:
+ if !m.typ.canStoreProgram() {
+ return fmt.Errorf("can't read a program from %s", m.typ)
+ }
+
+ other, err := unmarshalProgram(buf)
+ if err != nil {
+ return err
+ }
+
+ // The caller might close the program externally, so ignore errors.
+ _ = (*value).Close()
+
+ *value = other
+ return nil
+
+ case *Program:
+ if !m.typ.canStoreProgram() {
+ return fmt.Errorf("can't read a program from %s", m.typ)
+ }
+ return errors.New("require pointer to *Program")
+ }
+
+ return buf.Unmarshal(value)
+}
+
+// LoadPinnedMap loads a Map from a BPF file.
+func LoadPinnedMap(fileName string, opts *LoadPinOptions) (*Map, error) {
+ fd, err := sys.ObjGet(&sys.ObjGetAttr{
+ Pathname: sys.NewStringPointer(fileName),
+ FileFlags: opts.Marshal(),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ m, err := newMapFromFD(fd)
+ if err == nil {
+ m.pinnedPath = fileName
+ }
+
+ return m, err
+}
+
+// unmarshalMap creates a map from a map ID encoded in host endianness.
+func unmarshalMap(buf sysenc.Buffer) (*Map, error) {
+ var id uint32
+ if err := buf.Unmarshal(&id); err != nil {
+ return nil, err
+ }
+ return NewMapFromID(MapID(id))
+}
+
+// marshalMap marshals the fd of a map into a buffer in host endianness.
+func marshalMap(m *Map, length int) ([]byte, error) {
+ if length != 4 {
+ return nil, fmt.Errorf("can't marshal map to %d bytes", length)
+ }
+
+ buf := make([]byte, 4)
+ internal.NativeEndian.PutUint32(buf, m.fd.Uint())
+ return buf, nil
+}
+
+// MapIterator iterates a Map.
+//
+// See Map.Iterate.
+type MapIterator struct {
+ target *Map
+ curKey []byte
+ count, maxEntries uint32
+ done bool
+ err error
+}
+
+func newMapIterator(target *Map) *MapIterator {
+ return &MapIterator{
+ target: target,
+ maxEntries: target.maxEntries,
+ }
+}
+
+// Next decodes the next key and value.
+//
+// Iterating a hash map from which keys are being deleted is not
+// safe. You may see the same key multiple times. Iteration may
+// also abort with an error, see IsIterationAborted.
+//
+// Returns false if there are no more entries. You must check
+// the result of Err afterwards.
+//
+// See Map.Get for further caveats around valueOut.
+func (mi *MapIterator) Next(keyOut, valueOut interface{}) bool {
+ if mi.err != nil || mi.done {
+ return false
+ }
+
+ // For array-like maps NextKeyBytes returns nil only on after maxEntries
+ // iterations.
+ for mi.count <= mi.maxEntries {
+ var nextKey []byte
+ if mi.curKey == nil {
+ // Pass nil interface to NextKeyBytes to make sure the Map's first key
+ // is returned. If we pass an uninitialized []byte instead, it'll see a
+ // non-nil interface and try to marshal it.
+ nextKey, mi.err = mi.target.NextKeyBytes(nil)
+
+ mi.curKey = make([]byte, mi.target.keySize)
+ } else {
+ nextKey, mi.err = mi.target.NextKeyBytes(mi.curKey)
+ }
+ if mi.err != nil {
+ mi.err = fmt.Errorf("get next key: %w", mi.err)
+ return false
+ }
+
+ if nextKey == nil {
+ mi.done = true
+ return false
+ }
+
+ // The user can get access to nextKey since unmarshalBytes
+ // does not copy when unmarshaling into a []byte.
+ // Make a copy to prevent accidental corruption of
+ // iterator state.
+ copy(mi.curKey, nextKey)
+
+ mi.count++
+ mi.err = mi.target.Lookup(nextKey, valueOut)
+ if errors.Is(mi.err, ErrKeyNotExist) {
+ // Even though the key should be valid, we couldn't look up
+ // its value. If we're iterating a hash map this is probably
+ // because a concurrent delete removed the value before we
+ // could get it. This means that the next call to NextKeyBytes
+ // is very likely to restart iteration.
+ // If we're iterating one of the fd maps like
+ // ProgramArray it means that a given slot doesn't have
+ // a valid fd associated. It's OK to continue to the next slot.
+ continue
+ }
+ if mi.err != nil {
+ mi.err = fmt.Errorf("look up next key: %w", mi.err)
+ return false
+ }
+
+ if ptr, ok := keyOut.(unsafe.Pointer); ok {
+ copy(unsafe.Slice((*byte)(ptr), len(nextKey)), nextKey)
+ } else {
+ mi.err = sysenc.Unmarshal(keyOut, nextKey)
+ }
+
+ return mi.err == nil
+ }
+
+ mi.err = fmt.Errorf("%w", ErrIterationAborted)
+ return false
+}
+
+// Err returns any encountered error.
+//
+// The method must be called after Next returns nil.
+//
+// Returns ErrIterationAborted if it wasn't possible to do a full iteration.
+func (mi *MapIterator) Err() error {
+ return mi.err
+}
+
+// MapGetNextID returns the ID of the next eBPF map.
+//
+// Returns ErrNotExist, if there is no next eBPF map.
+func MapGetNextID(startID MapID) (MapID, error) {
+ attr := &sys.MapGetNextIdAttr{Id: uint32(startID)}
+ return MapID(attr.NextId), sys.MapGetNextId(attr)
+}
+
+// NewMapFromID returns the map for a given id.
+//
+// Returns ErrNotExist, if there is no eBPF map with the given id.
+func NewMapFromID(id MapID) (*Map, error) {
+ fd, err := sys.MapGetFdById(&sys.MapGetFdByIdAttr{
+ Id: uint32(id),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return newMapFromFD(fd)
+}
diff --git a/vendor/github.com/cilium/ebpf/marshalers.go b/vendor/github.com/cilium/ebpf/marshalers.go
new file mode 100644
index 000000000..e89a12f0f
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/marshalers.go
@@ -0,0 +1,129 @@
+package ebpf
+
+import (
+ "encoding"
+ "errors"
+ "fmt"
+ "reflect"
+ "unsafe"
+
+ "github.com/cilium/ebpf/internal"
+ "github.com/cilium/ebpf/internal/sys"
+ "github.com/cilium/ebpf/internal/sysenc"
+)
+
+// marshalMapSyscallInput converts an arbitrary value into a pointer suitable
+// to be passed to the kernel.
+//
+// As an optimization, it returns the original value if it is an
+// unsafe.Pointer.
+func marshalMapSyscallInput(data any, length int) (sys.Pointer, error) {
+ if ptr, ok := data.(unsafe.Pointer); ok {
+ return sys.NewPointer(ptr), nil
+ }
+
+ buf, err := sysenc.Marshal(data, length)
+ if err != nil {
+ return sys.Pointer{}, err
+ }
+
+ return buf.Pointer(), nil
+}
+
+func makeMapSyscallOutput(dst any, length int) sysenc.Buffer {
+ if ptr, ok := dst.(unsafe.Pointer); ok {
+ return sysenc.UnsafeBuffer(ptr)
+ }
+
+ _, ok := dst.(encoding.BinaryUnmarshaler)
+ if ok {
+ return sysenc.SyscallOutput(nil, length)
+ }
+
+ return sysenc.SyscallOutput(dst, length)
+}
+
+// marshalPerCPUValue encodes a slice containing one value per
+// possible CPU into a buffer of bytes.
+//
+// Values are initialized to zero if the slice has less elements than CPUs.
+func marshalPerCPUValue(slice any, elemLength int) (sys.Pointer, error) {
+ sliceType := reflect.TypeOf(slice)
+ if sliceType.Kind() != reflect.Slice {
+ return sys.Pointer{}, errors.New("per-CPU value requires slice")
+ }
+
+ possibleCPUs, err := internal.PossibleCPUs()
+ if err != nil {
+ return sys.Pointer{}, err
+ }
+
+ sliceValue := reflect.ValueOf(slice)
+ sliceLen := sliceValue.Len()
+ if sliceLen > possibleCPUs {
+ return sys.Pointer{}, fmt.Errorf("per-CPU value exceeds number of CPUs")
+ }
+
+ alignedElemLength := internal.Align(elemLength, 8)
+ buf := make([]byte, alignedElemLength*possibleCPUs)
+
+ for i := 0; i < sliceLen; i++ {
+ elem := sliceValue.Index(i).Interface()
+ elemBytes, err := sysenc.Marshal(elem, elemLength)
+ if err != nil {
+ return sys.Pointer{}, err
+ }
+
+ offset := i * alignedElemLength
+ elemBytes.CopyTo(buf[offset : offset+elemLength])
+ }
+
+ return sys.NewSlicePointer(buf), nil
+}
+
+// unmarshalPerCPUValue decodes a buffer into a slice containing one value per
+// possible CPU.
+//
+// slicePtr must be a pointer to a slice.
+func unmarshalPerCPUValue(slicePtr any, elemLength int, buf []byte) error {
+ slicePtrType := reflect.TypeOf(slicePtr)
+ if slicePtrType.Kind() != reflect.Ptr || slicePtrType.Elem().Kind() != reflect.Slice {
+ return fmt.Errorf("per-cpu value requires pointer to slice")
+ }
+
+ possibleCPUs, err := internal.PossibleCPUs()
+ if err != nil {
+ return err
+ }
+
+ sliceType := slicePtrType.Elem()
+ slice := reflect.MakeSlice(sliceType, possibleCPUs, possibleCPUs)
+
+ sliceElemType := sliceType.Elem()
+ sliceElemIsPointer := sliceElemType.Kind() == reflect.Ptr
+ if sliceElemIsPointer {
+ sliceElemType = sliceElemType.Elem()
+ }
+
+ stride := internal.Align(elemLength, 8)
+ for i := 0; i < possibleCPUs; i++ {
+ var elem any
+ if sliceElemIsPointer {
+ newElem := reflect.New(sliceElemType)
+ slice.Index(i).Set(newElem)
+ elem = newElem.Interface()
+ } else {
+ elem = slice.Index(i).Addr().Interface()
+ }
+
+ err := sysenc.Unmarshal(elem, buf[:elemLength])
+ if err != nil {
+ return fmt.Errorf("cpu %d: %w", i, err)
+ }
+
+ buf = buf[stride:]
+ }
+
+ reflect.ValueOf(slicePtr).Elem().Set(slice)
+ return nil
+}
diff --git a/vendor/github.com/cilium/ebpf/netlify.toml b/vendor/github.com/cilium/ebpf/netlify.toml
new file mode 100644
index 000000000..67c83f3b3
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/netlify.toml
@@ -0,0 +1,4 @@
+[build]
+ base = "docs/"
+ publish = "site/"
+ command = "mkdocs build"
diff --git a/vendor/github.com/cilium/ebpf/prog.go b/vendor/github.com/cilium/ebpf/prog.go
new file mode 100644
index 000000000..6d46a0422
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/prog.go
@@ -0,0 +1,1034 @@
+package ebpf
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "math"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "time"
+ "unsafe"
+
+ "github.com/cilium/ebpf/asm"
+ "github.com/cilium/ebpf/btf"
+ "github.com/cilium/ebpf/internal"
+ "github.com/cilium/ebpf/internal/sys"
+ "github.com/cilium/ebpf/internal/sysenc"
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+// ErrNotSupported is returned whenever the kernel doesn't support a feature.
+var ErrNotSupported = internal.ErrNotSupported
+
+// ProgramID represents the unique ID of an eBPF program.
+type ProgramID uint32
+
+const (
+ // Number of bytes to pad the output buffer for BPF_PROG_TEST_RUN.
+ // This is currently the maximum of spare space allocated for SKB
+ // and XDP programs, and equal to XDP_PACKET_HEADROOM + NET_IP_ALIGN.
+ outputPad = 256 + 2
+)
+
+// DefaultVerifierLogSize is the default number of bytes allocated for the
+// verifier log.
+const DefaultVerifierLogSize = 64 * 1024
+
+// maxVerifierLogSize is the maximum size of verifier log buffer the kernel
+// will accept before returning EINVAL.
+const maxVerifierLogSize = math.MaxUint32 >> 2
+
+// ProgramOptions control loading a program into the kernel.
+type ProgramOptions struct {
+ // Bitmap controlling the detail emitted by the kernel's eBPF verifier log.
+ // LogLevel-type values can be ORed together to request specific kinds of
+ // verifier output. See the documentation on [ebpf.LogLevel] for details.
+ //
+ // opts.LogLevel = (ebpf.LogLevelBranch | ebpf.LogLevelStats)
+ //
+ // If left to its default value, the program will first be loaded without
+ // verifier output enabled. Upon error, the program load will be repeated
+ // with LogLevelBranch and the given (or default) LogSize value.
+ //
+ // Setting this to a non-zero value will unconditionally enable the verifier
+ // log, populating the [ebpf.Program.VerifierLog] field on successful loads
+ // and including detailed verifier errors if the program is rejected. This
+ // will always allocate an output buffer, but will result in only a single
+ // attempt at loading the program.
+ LogLevel LogLevel
+
+ // Controls the output buffer size for the verifier log, in bytes. See the
+ // documentation on ProgramOptions.LogLevel for details about how this value
+ // is used.
+ //
+ // If this value is set too low to fit the verifier log, the resulting
+ // [ebpf.VerifierError]'s Truncated flag will be true, and the error string
+ // will also contain a hint to that effect.
+ //
+ // Defaults to DefaultVerifierLogSize.
+ LogSize int
+
+ // Disables the verifier log completely, regardless of other options.
+ LogDisabled bool
+
+ // Type information used for CO-RE relocations.
+ //
+ // This is useful in environments where the kernel BTF is not available
+ // (containers) or where it is in a non-standard location. Defaults to
+ // use the kernel BTF from a well-known location if nil.
+ KernelTypes *btf.Spec
+}
+
+// ProgramSpec defines a Program.
+type ProgramSpec struct {
+ // Name is passed to the kernel as a debug aid. Must only contain
+ // alpha numeric and '_' characters.
+ Name string
+
+ // Type determines at which hook in the kernel a program will run.
+ Type ProgramType
+
+ // AttachType of the program, needed to differentiate allowed context
+ // accesses in some newer program types like CGroupSockAddr.
+ //
+ // Available on kernels 4.17 and later.
+ AttachType AttachType
+
+ // Name of a kernel data structure or function to attach to. Its
+ // interpretation depends on Type and AttachType.
+ AttachTo string
+
+ // The program to attach to. Must be provided manually.
+ AttachTarget *Program
+
+ // The name of the ELF section this program originated from.
+ SectionName string
+
+ Instructions asm.Instructions
+
+ // Flags is passed to the kernel and specifies additional program
+ // load attributes.
+ Flags uint32
+
+ // License of the program. Some helpers are only available if
+ // the license is deemed compatible with the GPL.
+ //
+ // See https://www.kernel.org/doc/html/latest/process/license-rules.html#id1
+ License string
+
+ // Version used by Kprobe programs.
+ //
+ // Deprecated on kernels 5.0 and later. Leave empty to let the library
+ // detect this value automatically.
+ KernelVersion uint32
+
+ // The byte order this program was compiled for, may be nil.
+ ByteOrder binary.ByteOrder
+}
+
+// Copy returns a copy of the spec.
+func (ps *ProgramSpec) Copy() *ProgramSpec {
+ if ps == nil {
+ return nil
+ }
+
+ cpy := *ps
+ cpy.Instructions = make(asm.Instructions, len(ps.Instructions))
+ copy(cpy.Instructions, ps.Instructions)
+ return &cpy
+}
+
+// Tag calculates the kernel tag for a series of instructions.
+//
+// Use asm.Instructions.Tag if you need to calculate for non-native endianness.
+func (ps *ProgramSpec) Tag() (string, error) {
+ return ps.Instructions.Tag(internal.NativeEndian)
+}
+
+// VerifierError is returned by [NewProgram] and [NewProgramWithOptions] if a
+// program is rejected by the verifier.
+//
+// Use [errors.As] to access the error.
+type VerifierError = internal.VerifierError
+
+// Program represents BPF program loaded into the kernel.
+//
+// It is not safe to close a Program which is used by other goroutines.
+type Program struct {
+ // Contains the output of the kernel verifier if enabled,
+ // otherwise it is empty.
+ VerifierLog string
+
+ fd *sys.FD
+ name string
+ pinnedPath string
+ typ ProgramType
+}
+
+// NewProgram creates a new Program.
+//
+// See [NewProgramWithOptions] for details.
+//
+// Returns a [VerifierError] containing the full verifier log if the program is
+// rejected by the kernel.
+func NewProgram(spec *ProgramSpec) (*Program, error) {
+ return NewProgramWithOptions(spec, ProgramOptions{})
+}
+
+// NewProgramWithOptions creates a new Program.
+//
+// Loading a program for the first time will perform
+// feature detection by loading small, temporary programs.
+//
+// Returns a [VerifierError] containing the full verifier log if the program is
+// rejected by the kernel.
+func NewProgramWithOptions(spec *ProgramSpec, opts ProgramOptions) (*Program, error) {
+ if spec == nil {
+ return nil, errors.New("can't load a program from a nil spec")
+ }
+
+ prog, err := newProgramWithOptions(spec, opts)
+ if errors.Is(err, asm.ErrUnsatisfiedMapReference) {
+ return nil, fmt.Errorf("cannot load program without loading its whole collection: %w", err)
+ }
+ return prog, err
+}
+
+func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions) (*Program, error) {
+ if len(spec.Instructions) == 0 {
+ return nil, errors.New("instructions cannot be empty")
+ }
+
+ if spec.Type == UnspecifiedProgram {
+ return nil, errors.New("can't load program of unspecified type")
+ }
+
+ if spec.ByteOrder != nil && spec.ByteOrder != internal.NativeEndian {
+ return nil, fmt.Errorf("can't load %s program on %s", spec.ByteOrder, internal.NativeEndian)
+ }
+
+ if opts.LogSize < 0 {
+ return nil, errors.New("ProgramOptions.LogSize must be a positive value; disable verifier logs using ProgramOptions.LogDisabled")
+ }
+
+ // Kernels before 5.0 (6c4fc209fcf9 "bpf: remove useless version check for prog load")
+ // require the version field to be set to the value of the KERNEL_VERSION
+ // macro for kprobe-type programs.
+ // Overwrite Kprobe program version if set to zero or the magic version constant.
+ kv := spec.KernelVersion
+ if spec.Type == Kprobe && (kv == 0 || kv == internal.MagicKernelVersion) {
+ v, err := internal.KernelVersion()
+ if err != nil {
+ return nil, fmt.Errorf("detecting kernel version: %w", err)
+ }
+ kv = v.Kernel()
+ }
+
+ attr := &sys.ProgLoadAttr{
+ ProgType: sys.ProgType(spec.Type),
+ ProgFlags: spec.Flags,
+ ExpectedAttachType: sys.AttachType(spec.AttachType),
+ License: sys.NewStringPointer(spec.License),
+ KernVersion: kv,
+ }
+
+ if haveObjName() == nil {
+ attr.ProgName = sys.NewObjName(spec.Name)
+ }
+
+ insns := make(asm.Instructions, len(spec.Instructions))
+ copy(insns, spec.Instructions)
+
+ handle, fib, lib, err := btf.MarshalExtInfos(insns)
+ if err != nil && !errors.Is(err, btf.ErrNotSupported) {
+ return nil, fmt.Errorf("load ext_infos: %w", err)
+ }
+ if handle != nil {
+ defer handle.Close()
+
+ attr.ProgBtfFd = uint32(handle.FD())
+
+ attr.FuncInfoRecSize = btf.FuncInfoSize
+ attr.FuncInfoCnt = uint32(len(fib)) / btf.FuncInfoSize
+ attr.FuncInfo = sys.NewSlicePointer(fib)
+
+ attr.LineInfoRecSize = btf.LineInfoSize
+ attr.LineInfoCnt = uint32(len(lib)) / btf.LineInfoSize
+ attr.LineInfo = sys.NewSlicePointer(lib)
+ }
+
+ if err := applyRelocations(insns, opts.KernelTypes, spec.ByteOrder); err != nil {
+ return nil, fmt.Errorf("apply CO-RE relocations: %w", err)
+ }
+
+ kconfig, err := resolveKconfigReferences(insns)
+ if err != nil {
+ return nil, fmt.Errorf("resolve .kconfig: %w", err)
+ }
+ defer kconfig.Close()
+
+ if err := fixupAndValidate(insns); err != nil {
+ return nil, err
+ }
+
+ handles, err := fixupKfuncs(insns)
+ if err != nil {
+ return nil, fmt.Errorf("fixing up kfuncs: %w", err)
+ }
+ defer handles.Close()
+
+ if len(handles) > 0 {
+ fdArray := handles.fdArray()
+ attr.FdArray = sys.NewPointer(unsafe.Pointer(&fdArray[0]))
+ }
+
+ buf := bytes.NewBuffer(make([]byte, 0, insns.Size()))
+ err = insns.Marshal(buf, internal.NativeEndian)
+ if err != nil {
+ return nil, err
+ }
+
+ bytecode := buf.Bytes()
+ attr.Insns = sys.NewSlicePointer(bytecode)
+ attr.InsnCnt = uint32(len(bytecode) / asm.InstructionSize)
+
+ if spec.AttachTarget != nil {
+ targetID, err := findTargetInProgram(spec.AttachTarget, spec.AttachTo, spec.Type, spec.AttachType)
+ if err != nil {
+ return nil, fmt.Errorf("attach %s/%s: %w", spec.Type, spec.AttachType, err)
+ }
+
+ attr.AttachBtfId = targetID
+ attr.AttachBtfObjFd = uint32(spec.AttachTarget.FD())
+ defer runtime.KeepAlive(spec.AttachTarget)
+ } else if spec.AttachTo != "" {
+ module, targetID, err := findProgramTargetInKernel(spec.AttachTo, spec.Type, spec.AttachType)
+ if err != nil && !errors.Is(err, errUnrecognizedAttachType) {
+ // We ignore errUnrecognizedAttachType since AttachTo may be non-empty
+ // for programs that don't attach anywhere.
+ return nil, fmt.Errorf("attach %s/%s: %w", spec.Type, spec.AttachType, err)
+ }
+
+ attr.AttachBtfId = targetID
+ if module != nil {
+ attr.AttachBtfObjFd = uint32(module.FD())
+ defer module.Close()
+ }
+ }
+
+ if opts.LogSize == 0 {
+ opts.LogSize = DefaultVerifierLogSize
+ }
+
+ // The caller requested a specific verifier log level. Set up the log buffer.
+ var logBuf []byte
+ if !opts.LogDisabled && opts.LogLevel != 0 {
+ logBuf = make([]byte, opts.LogSize)
+ attr.LogLevel = opts.LogLevel
+ attr.LogSize = uint32(len(logBuf))
+ attr.LogBuf = sys.NewSlicePointer(logBuf)
+ }
+
+ fd, err := sys.ProgLoad(attr)
+ if err == nil {
+ return &Program{unix.ByteSliceToString(logBuf), fd, spec.Name, "", spec.Type}, nil
+ }
+
+ // An error occurred loading the program, but the caller did not explicitly
+ // enable the verifier log. Re-run with branch-level verifier logs enabled to
+ // obtain more info. Preserve the original error to return it to the caller.
+ // An undersized log buffer will result in ENOSPC regardless of the underlying
+ // cause.
+ var err2 error
+ if !opts.LogDisabled && opts.LogLevel == 0 {
+ logBuf = make([]byte, opts.LogSize)
+ attr.LogLevel = LogLevelBranch
+ attr.LogSize = uint32(len(logBuf))
+ attr.LogBuf = sys.NewSlicePointer(logBuf)
+
+ _, err2 = sys.ProgLoad(attr)
+ }
+
+ switch {
+ case errors.Is(err, unix.EPERM):
+ if len(logBuf) > 0 && logBuf[0] == 0 {
+ // EPERM due to RLIMIT_MEMLOCK happens before the verifier, so we can
+ // check that the log is empty to reduce false positives.
+ return nil, fmt.Errorf("load program: %w (MEMLOCK may be too low, consider rlimit.RemoveMemlock)", err)
+ }
+
+ fallthrough
+
+ case errors.Is(err, unix.EINVAL):
+ if hasFunctionReferences(spec.Instructions) {
+ if err := haveBPFToBPFCalls(); err != nil {
+ return nil, fmt.Errorf("load program: %w", err)
+ }
+ }
+
+ if opts.LogSize > maxVerifierLogSize {
+ return nil, fmt.Errorf("load program: %w (ProgramOptions.LogSize exceeds maximum value of %d)", err, maxVerifierLogSize)
+ }
+ }
+
+ truncated := errors.Is(err, unix.ENOSPC) || errors.Is(err2, unix.ENOSPC)
+ return nil, internal.ErrorWithLog("load program", err, logBuf, truncated)
+}
+
+// NewProgramFromFD creates a program from a raw fd.
+//
+// You should not use fd after calling this function.
+//
+// Requires at least Linux 4.10.
+func NewProgramFromFD(fd int) (*Program, error) {
+ f, err := sys.NewFD(fd)
+ if err != nil {
+ return nil, err
+ }
+
+ return newProgramFromFD(f)
+}
+
+// NewProgramFromID returns the program for a given id.
+//
+// Returns ErrNotExist, if there is no eBPF program with the given id.
+func NewProgramFromID(id ProgramID) (*Program, error) {
+ fd, err := sys.ProgGetFdById(&sys.ProgGetFdByIdAttr{
+ Id: uint32(id),
+ })
+ if err != nil {
+ return nil, fmt.Errorf("get program by id: %w", err)
+ }
+
+ return newProgramFromFD(fd)
+}
+
+func newProgramFromFD(fd *sys.FD) (*Program, error) {
+ info, err := newProgramInfoFromFd(fd)
+ if err != nil {
+ fd.Close()
+ return nil, fmt.Errorf("discover program type: %w", err)
+ }
+
+ return &Program{"", fd, info.Name, "", info.Type}, nil
+}
+
+func (p *Program) String() string {
+ if p.name != "" {
+ return fmt.Sprintf("%s(%s)#%v", p.typ, p.name, p.fd)
+ }
+ return fmt.Sprintf("%s(%v)", p.typ, p.fd)
+}
+
+// Type returns the underlying type of the program.
+func (p *Program) Type() ProgramType {
+ return p.typ
+}
+
+// Info returns metadata about the program.
+//
+// Requires at least 4.10.
+func (p *Program) Info() (*ProgramInfo, error) {
+ return newProgramInfoFromFd(p.fd)
+}
+
+// Handle returns a reference to the program's type information in the kernel.
+//
+// Returns ErrNotSupported if the kernel has no BTF support, or if there is no
+// BTF associated with the program.
+func (p *Program) Handle() (*btf.Handle, error) {
+ info, err := p.Info()
+ if err != nil {
+ return nil, err
+ }
+
+ id, ok := info.BTFID()
+ if !ok {
+ return nil, fmt.Errorf("program %s: retrieve BTF ID: %w", p, ErrNotSupported)
+ }
+
+ return btf.NewHandleFromID(id)
+}
+
+// FD gets the file descriptor of the Program.
+//
+// It is invalid to call this function after Close has been called.
+func (p *Program) FD() int {
+ return p.fd.Int()
+}
+
+// Clone creates a duplicate of the Program.
+//
+// Closing the duplicate does not affect the original, and vice versa.
+//
+// Cloning a nil Program returns nil.
+func (p *Program) Clone() (*Program, error) {
+ if p == nil {
+ return nil, nil
+ }
+
+ dup, err := p.fd.Dup()
+ if err != nil {
+ return nil, fmt.Errorf("can't clone program: %w", err)
+ }
+
+ return &Program{p.VerifierLog, dup, p.name, "", p.typ}, nil
+}
+
+// Pin persists the Program on the BPF virtual file system past the lifetime of
+// the process that created it
+//
+// Calling Pin on a previously pinned program will overwrite the path, except when
+// the new path already exists. Re-pinning across filesystems is not supported.
+//
+// This requires bpffs to be mounted above fileName.
+// See https://docs.cilium.io/en/stable/network/kubernetes/configuration/#mounting-bpffs-with-systemd
+func (p *Program) Pin(fileName string) error {
+ if err := internal.Pin(p.pinnedPath, fileName, p.fd); err != nil {
+ return err
+ }
+ p.pinnedPath = fileName
+ return nil
+}
+
+// Unpin removes the persisted state for the Program from the BPF virtual filesystem.
+//
+// Failed calls to Unpin will not alter the state returned by IsPinned.
+//
+// Unpinning an unpinned Program returns nil.
+func (p *Program) Unpin() error {
+ if err := internal.Unpin(p.pinnedPath); err != nil {
+ return err
+ }
+ p.pinnedPath = ""
+ return nil
+}
+
+// IsPinned returns true if the Program has a non-empty pinned path.
+func (p *Program) IsPinned() bool {
+ return p.pinnedPath != ""
+}
+
+// Close the Program's underlying file descriptor, which could unload
+// the program from the kernel if it is not pinned or attached to a
+// kernel hook.
+func (p *Program) Close() error {
+ if p == nil {
+ return nil
+ }
+
+ return p.fd.Close()
+}
+
+// Various options for Run'ing a Program
+type RunOptions struct {
+ // Program's data input. Required field.
+ //
+ // The kernel expects at least 14 bytes input for an ethernet header for
+ // XDP and SKB programs.
+ Data []byte
+ // Program's data after Program has run. Caller must allocate. Optional field.
+ DataOut []byte
+ // Program's context input. Optional field.
+ Context interface{}
+ // Program's context after Program has run. Must be a pointer or slice. Optional field.
+ ContextOut interface{}
+ // Minimum number of times to run Program. Optional field. Defaults to 1.
+ //
+ // The program may be executed more often than this due to interruptions, e.g.
+ // when runtime.AllThreadsSyscall is invoked.
+ Repeat uint32
+ // Optional flags.
+ Flags uint32
+ // CPU to run Program on. Optional field.
+ // Note not all program types support this field.
+ CPU uint32
+ // Called whenever the syscall is interrupted, and should be set to testing.B.ResetTimer
+ // or similar. Typically used during benchmarking. Optional field.
+ //
+ // Deprecated: use [testing.B.ReportMetric] with unit "ns/op" instead.
+ Reset func()
+}
+
+// Test runs the Program in the kernel with the given input and returns the
+// value returned by the eBPF program. outLen may be zero.
+//
+// Note: the kernel expects at least 14 bytes input for an ethernet header for
+// XDP and SKB programs.
+//
+// This function requires at least Linux 4.12.
+func (p *Program) Test(in []byte) (uint32, []byte, error) {
+ // Older kernels ignore the dataSizeOut argument when copying to user space.
+ // Combined with things like bpf_xdp_adjust_head() we don't really know what the final
+ // size will be. Hence we allocate an output buffer which we hope will always be large
+ // enough, and panic if the kernel wrote past the end of the allocation.
+ // See https://patchwork.ozlabs.org/cover/1006822/
+ var out []byte
+ if len(in) > 0 {
+ out = make([]byte, len(in)+outputPad)
+ }
+
+ opts := RunOptions{
+ Data: in,
+ DataOut: out,
+ Repeat: 1,
+ }
+
+ ret, _, err := p.run(&opts)
+ if err != nil {
+ return ret, nil, fmt.Errorf("test program: %w", err)
+ }
+ return ret, opts.DataOut, nil
+}
+
+// Run runs the Program in kernel with given RunOptions.
+//
+// Note: the same restrictions from Test apply.
+func (p *Program) Run(opts *RunOptions) (uint32, error) {
+ ret, _, err := p.run(opts)
+ if err != nil {
+ return ret, fmt.Errorf("run program: %w", err)
+ }
+ return ret, nil
+}
+
+// Benchmark runs the Program with the given input for a number of times
+// and returns the time taken per iteration.
+//
+// Returns the result of the last execution of the program and the time per
+// run or an error. reset is called whenever the benchmark syscall is
+// interrupted, and should be set to testing.B.ResetTimer or similar.
+//
+// This function requires at least Linux 4.12.
+func (p *Program) Benchmark(in []byte, repeat int, reset func()) (uint32, time.Duration, error) {
+ if uint(repeat) > math.MaxUint32 {
+ return 0, 0, fmt.Errorf("repeat is too high")
+ }
+
+ opts := RunOptions{
+ Data: in,
+ Repeat: uint32(repeat),
+ Reset: reset,
+ }
+
+ ret, total, err := p.run(&opts)
+ if err != nil {
+ return ret, total, fmt.Errorf("benchmark program: %w", err)
+ }
+ return ret, total, nil
+}
+
+var haveProgRun = internal.NewFeatureTest("BPF_PROG_RUN", "4.12", func() error {
+ prog, err := NewProgram(&ProgramSpec{
+ // SocketFilter does not require privileges on newer kernels.
+ Type: SocketFilter,
+ Instructions: asm.Instructions{
+ asm.LoadImm(asm.R0, 0, asm.DWord),
+ asm.Return(),
+ },
+ License: "MIT",
+ })
+ if err != nil {
+ // This may be because we lack sufficient permissions, etc.
+ return err
+ }
+ defer prog.Close()
+
+ in := internal.EmptyBPFContext
+ attr := sys.ProgRunAttr{
+ ProgFd: uint32(prog.FD()),
+ DataSizeIn: uint32(len(in)),
+ DataIn: sys.NewSlicePointer(in),
+ }
+
+ err = sys.ProgRun(&attr)
+ switch {
+ case errors.Is(err, unix.EINVAL):
+ // Check for EINVAL specifically, rather than err != nil since we
+ // otherwise misdetect due to insufficient permissions.
+ return internal.ErrNotSupported
+
+ case errors.Is(err, unix.EINTR):
+ // We know that PROG_TEST_RUN is supported if we get EINTR.
+ return nil
+
+ case errors.Is(err, sys.ENOTSUPP):
+ // The first PROG_TEST_RUN patches shipped in 4.12 didn't include
+ // a test runner for SocketFilter. ENOTSUPP means PROG_TEST_RUN is
+ // supported, but not for the program type used in the probe.
+ return nil
+ }
+
+ return err
+})
+
+func (p *Program) run(opts *RunOptions) (uint32, time.Duration, error) {
+ if uint(len(opts.Data)) > math.MaxUint32 {
+ return 0, 0, fmt.Errorf("input is too long")
+ }
+
+ if err := haveProgRun(); err != nil {
+ return 0, 0, err
+ }
+
+ var ctxBytes []byte
+ if opts.Context != nil {
+ ctx := new(bytes.Buffer)
+ if err := binary.Write(ctx, internal.NativeEndian, opts.Context); err != nil {
+ return 0, 0, fmt.Errorf("cannot serialize context: %v", err)
+ }
+ ctxBytes = ctx.Bytes()
+ }
+
+ var ctxOut []byte
+ if opts.ContextOut != nil {
+ ctxOut = make([]byte, binary.Size(opts.ContextOut))
+ }
+
+ attr := sys.ProgRunAttr{
+ ProgFd: p.fd.Uint(),
+ DataSizeIn: uint32(len(opts.Data)),
+ DataSizeOut: uint32(len(opts.DataOut)),
+ DataIn: sys.NewSlicePointer(opts.Data),
+ DataOut: sys.NewSlicePointer(opts.DataOut),
+ Repeat: uint32(opts.Repeat),
+ CtxSizeIn: uint32(len(ctxBytes)),
+ CtxSizeOut: uint32(len(ctxOut)),
+ CtxIn: sys.NewSlicePointer(ctxBytes),
+ CtxOut: sys.NewSlicePointer(ctxOut),
+ Flags: opts.Flags,
+ Cpu: opts.CPU,
+ }
+
+ if attr.Repeat == 0 {
+ attr.Repeat = 1
+ }
+
+retry:
+ for {
+ err := sys.ProgRun(&attr)
+ if err == nil {
+ break retry
+ }
+
+ if errors.Is(err, unix.EINTR) {
+ if attr.Repeat == 1 {
+ // Older kernels check whether enough repetitions have been
+ // executed only after checking for pending signals.
+ //
+ // run signal? done? run ...
+ //
+ // As a result we can get EINTR for repeat==1 even though
+ // the program was run exactly once. Treat this as a
+ // successful run instead.
+ //
+ // Since commit 607b9cc92bd7 ("bpf: Consolidate shared test timing code")
+ // the conditions are reversed:
+ // run done? signal? ...
+ break retry
+ }
+
+ if opts.Reset != nil {
+ opts.Reset()
+ }
+ continue retry
+ }
+
+ if errors.Is(err, sys.ENOTSUPP) {
+ return 0, 0, fmt.Errorf("kernel doesn't support running %s: %w", p.Type(), ErrNotSupported)
+ }
+
+ return 0, 0, err
+ }
+
+ if opts.DataOut != nil {
+ if int(attr.DataSizeOut) > cap(opts.DataOut) {
+ // Houston, we have a problem. The program created more data than we allocated,
+ // and the kernel wrote past the end of our buffer.
+ panic("kernel wrote past end of output buffer")
+ }
+ opts.DataOut = opts.DataOut[:int(attr.DataSizeOut)]
+ }
+
+ if len(ctxOut) != 0 {
+ b := bytes.NewReader(ctxOut)
+ if err := binary.Read(b, internal.NativeEndian, opts.ContextOut); err != nil {
+ return 0, 0, fmt.Errorf("failed to decode ContextOut: %v", err)
+ }
+ }
+
+ total := time.Duration(attr.Duration) * time.Nanosecond
+ return attr.Retval, total, nil
+}
+
+func unmarshalProgram(buf sysenc.Buffer) (*Program, error) {
+ var id uint32
+ if err := buf.Unmarshal(&id); err != nil {
+ return nil, err
+ }
+
+ // Looking up an entry in a nested map or prog array returns an id,
+ // not an fd.
+ return NewProgramFromID(ProgramID(id))
+}
+
+func marshalProgram(p *Program, length int) ([]byte, error) {
+ if length != 4 {
+ return nil, fmt.Errorf("can't marshal program to %d bytes", length)
+ }
+
+ buf := make([]byte, 4)
+ internal.NativeEndian.PutUint32(buf, p.fd.Uint())
+ return buf, nil
+}
+
+// LoadPinnedProgram loads a Program from a BPF file.
+//
+// Requires at least Linux 4.11.
+func LoadPinnedProgram(fileName string, opts *LoadPinOptions) (*Program, error) {
+ fd, err := sys.ObjGet(&sys.ObjGetAttr{
+ Pathname: sys.NewStringPointer(fileName),
+ FileFlags: opts.Marshal(),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ info, err := newProgramInfoFromFd(fd)
+ if err != nil {
+ _ = fd.Close()
+ return nil, fmt.Errorf("info for %s: %w", fileName, err)
+ }
+
+ var progName string
+ if haveObjName() == nil {
+ progName = info.Name
+ } else {
+ progName = filepath.Base(fileName)
+ }
+
+ return &Program{"", fd, progName, fileName, info.Type}, nil
+}
+
+// SanitizeName replaces all invalid characters in name with replacement.
+// Passing a negative value for replacement will delete characters instead
+// of replacing them. Use this to automatically generate valid names for maps
+// and programs at runtime.
+//
+// The set of allowed characters depends on the running kernel version.
+// Dots are only allowed as of kernel 5.2.
+func SanitizeName(name string, replacement rune) string {
+ return strings.Map(func(char rune) rune {
+ if invalidBPFObjNameChar(char) {
+ return replacement
+ }
+ return char
+ }, name)
+}
+
+// ProgramGetNextID returns the ID of the next eBPF program.
+//
+// Returns ErrNotExist, if there is no next eBPF program.
+func ProgramGetNextID(startID ProgramID) (ProgramID, error) {
+ attr := &sys.ProgGetNextIdAttr{Id: uint32(startID)}
+ return ProgramID(attr.NextId), sys.ProgGetNextId(attr)
+}
+
+// BindMap binds map to the program and is only released once program is released.
+//
+// This may be used in cases where metadata should be associated with the program
+// which otherwise does not contain any references to the map.
+func (p *Program) BindMap(m *Map) error {
+ attr := &sys.ProgBindMapAttr{
+ ProgFd: uint32(p.FD()),
+ MapFd: uint32(m.FD()),
+ }
+
+ return sys.ProgBindMap(attr)
+}
+
+var errUnrecognizedAttachType = errors.New("unrecognized attach type")
+
+// find an attach target type in the kernel.
+//
+// name, progType and attachType determine which type we need to attach to.
+//
+// The attach target may be in a loaded kernel module.
+// In that case the returned handle will be non-nil.
+// The caller is responsible for closing the handle.
+//
+// Returns errUnrecognizedAttachType if the combination of progType and attachType
+// is not recognised.
+func findProgramTargetInKernel(name string, progType ProgramType, attachType AttachType) (*btf.Handle, btf.TypeID, error) {
+ type match struct {
+ p ProgramType
+ a AttachType
+ }
+
+ var (
+ typeName, featureName string
+ target btf.Type
+ )
+
+ switch (match{progType, attachType}) {
+ case match{LSM, AttachLSMMac}:
+ typeName = "bpf_lsm_" + name
+ featureName = name + " LSM hook"
+ target = (*btf.Func)(nil)
+ case match{Tracing, AttachTraceIter}:
+ typeName = "bpf_iter_" + name
+ featureName = name + " iterator"
+ target = (*btf.Func)(nil)
+ case match{Tracing, AttachTraceFEntry}:
+ typeName = name
+ featureName = fmt.Sprintf("fentry %s", name)
+ target = (*btf.Func)(nil)
+ case match{Tracing, AttachTraceFExit}:
+ typeName = name
+ featureName = fmt.Sprintf("fexit %s", name)
+ target = (*btf.Func)(nil)
+ case match{Tracing, AttachModifyReturn}:
+ typeName = name
+ featureName = fmt.Sprintf("fmod_ret %s", name)
+ target = (*btf.Func)(nil)
+ case match{Tracing, AttachTraceRawTp}:
+ typeName = fmt.Sprintf("btf_trace_%s", name)
+ featureName = fmt.Sprintf("raw_tp %s", name)
+ target = (*btf.Typedef)(nil)
+ default:
+ return nil, 0, errUnrecognizedAttachType
+ }
+
+ spec, err := btf.LoadKernelSpec()
+ if err != nil {
+ return nil, 0, fmt.Errorf("load kernel spec: %w", err)
+ }
+
+ spec, module, err := findTargetInKernel(spec, typeName, &target)
+ if errors.Is(err, btf.ErrNotFound) {
+ return nil, 0, &internal.UnsupportedFeatureError{Name: featureName}
+ }
+ // See cilium/ebpf#894. Until we can disambiguate between equally-named kernel
+ // symbols, we should explicitly refuse program loads. They will not reliably
+ // do what the caller intended.
+ if errors.Is(err, btf.ErrMultipleMatches) {
+ return nil, 0, fmt.Errorf("attaching to ambiguous kernel symbol is not supported: %w", err)
+ }
+ if err != nil {
+ return nil, 0, fmt.Errorf("find target for %s: %w", featureName, err)
+ }
+
+ id, err := spec.TypeID(target)
+ if err != nil {
+ module.Close()
+ return nil, 0, err
+ }
+
+ return module, id, nil
+}
+
+// findTargetInKernel attempts to find a named type in the current kernel.
+//
+// target will point at the found type after a successful call. Searches both
+// vmlinux and any loaded modules.
+//
+// Returns a non-nil handle if the type was found in a module, or btf.ErrNotFound
+// if the type wasn't found at all.
+func findTargetInKernel(kernelSpec *btf.Spec, typeName string, target *btf.Type) (*btf.Spec, *btf.Handle, error) {
+ err := kernelSpec.TypeByName(typeName, target)
+ if errors.Is(err, btf.ErrNotFound) {
+ spec, module, err := findTargetInModule(kernelSpec, typeName, target)
+ if err != nil {
+ return nil, nil, fmt.Errorf("find target in modules: %w", err)
+ }
+ return spec, module, nil
+ }
+ if err != nil {
+ return nil, nil, fmt.Errorf("find target in vmlinux: %w", err)
+ }
+ return kernelSpec, nil, err
+}
+
+// findTargetInModule attempts to find a named type in any loaded module.
+//
+// base must contain the kernel's types and is used to parse kmod BTF. Modules
+// are searched in the order they were loaded.
+//
+// Returns btf.ErrNotFound if the target can't be found in any module.
+func findTargetInModule(base *btf.Spec, typeName string, target *btf.Type) (*btf.Spec, *btf.Handle, error) {
+ it := new(btf.HandleIterator)
+ defer it.Handle.Close()
+
+ for it.Next() {
+ info, err := it.Handle.Info()
+ if err != nil {
+ return nil, nil, fmt.Errorf("get info for BTF ID %d: %w", it.ID, err)
+ }
+
+ if !info.IsModule() {
+ continue
+ }
+
+ spec, err := it.Handle.Spec(base)
+ if err != nil {
+ return nil, nil, fmt.Errorf("parse types for module %s: %w", info.Name, err)
+ }
+
+ err = spec.TypeByName(typeName, target)
+ if errors.Is(err, btf.ErrNotFound) {
+ continue
+ }
+ if err != nil {
+ return nil, nil, fmt.Errorf("lookup type in module %s: %w", info.Name, err)
+ }
+
+ return spec, it.Take(), nil
+ }
+ if err := it.Err(); err != nil {
+ return nil, nil, fmt.Errorf("iterate modules: %w", err)
+ }
+
+ return nil, nil, btf.ErrNotFound
+}
+
+// find an attach target type in a program.
+//
+// Returns errUnrecognizedAttachType.
+func findTargetInProgram(prog *Program, name string, progType ProgramType, attachType AttachType) (btf.TypeID, error) {
+ type match struct {
+ p ProgramType
+ a AttachType
+ }
+
+ var typeName string
+ switch (match{progType, attachType}) {
+ case match{Extension, AttachNone},
+ match{Tracing, AttachTraceFEntry},
+ match{Tracing, AttachTraceFExit}:
+ typeName = name
+ default:
+ return 0, errUnrecognizedAttachType
+ }
+
+ btfHandle, err := prog.Handle()
+ if err != nil {
+ return 0, fmt.Errorf("load target BTF: %w", err)
+ }
+ defer btfHandle.Close()
+
+ spec, err := btfHandle.Spec(nil)
+ if err != nil {
+ return 0, err
+ }
+
+ var targetFunc *btf.Func
+ err = spec.TypeByName(typeName, &targetFunc)
+ if err != nil {
+ return 0, fmt.Errorf("find target %s: %w", typeName, err)
+ }
+
+ return spec.TypeID(targetFunc)
+}
diff --git a/vendor/github.com/cilium/ebpf/rlimit/rlimit.go b/vendor/github.com/cilium/ebpf/rlimit/rlimit.go
new file mode 100644
index 000000000..2a6973744
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/rlimit/rlimit.go
@@ -0,0 +1,123 @@
+// Package rlimit allows raising RLIMIT_MEMLOCK if necessary for the use of BPF.
+package rlimit
+
+import (
+ "errors"
+ "fmt"
+ "sync"
+
+ "github.com/cilium/ebpf/internal"
+ "github.com/cilium/ebpf/internal/sys"
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+var (
+ unsupportedMemcgAccounting = &internal.UnsupportedFeatureError{
+ MinimumVersion: internal.Version{5, 11, 0},
+ Name: "memcg-based accounting for BPF memory",
+ }
+ haveMemcgAccounting error
+
+ rlimitMu sync.Mutex
+)
+
+func init() {
+ // We have to run this feature test at init, since it relies on changing
+ // RLIMIT_MEMLOCK. Doing so is not safe in a concurrent program. Instead,
+ // we rely on the initialization order guaranteed by the Go runtime to
+ // execute the test in a safe environment:
+ //
+ // the invocation of init functions happens in a single goroutine,
+ // sequentially, one package at a time.
+ //
+ // This is also the reason why RemoveMemlock is in its own package:
+ // we only want to run the initializer if RemoveMemlock is called
+ // from somewhere.
+ haveMemcgAccounting = detectMemcgAccounting()
+}
+
+func detectMemcgAccounting() error {
+ // Retrieve the original limit to prevent lowering Max, since
+ // doing so is a permanent operation when running unprivileged.
+ var oldLimit unix.Rlimit
+ if err := unix.Prlimit(0, unix.RLIMIT_MEMLOCK, nil, &oldLimit); err != nil {
+ return fmt.Errorf("getting original memlock rlimit: %s", err)
+ }
+
+ // Drop the current limit to zero, maintaining the old Max value.
+ // This is always permitted by the kernel for unprivileged users.
+ // Retrieve a new copy of the old limit tuple to minimize the chances
+ // of failing the restore operation below.
+ zeroLimit := unix.Rlimit{Cur: 0, Max: oldLimit.Max}
+ if err := unix.Prlimit(0, unix.RLIMIT_MEMLOCK, &zeroLimit, &oldLimit); err != nil {
+ return fmt.Errorf("lowering memlock rlimit: %s", err)
+ }
+
+ attr := sys.MapCreateAttr{
+ MapType: 2, /* Array */
+ KeySize: 4,
+ ValueSize: 4,
+ MaxEntries: 1,
+ }
+
+ // Creating a map allocates shared (and locked) memory that counts against
+ // the rlimit on pre-5.11 kernels, but against the memory cgroup budget on
+ // kernels 5.11 and over. If this call succeeds with the process' memlock
+ // rlimit set to 0, we can reasonably assume memcg accounting is supported.
+ fd, mapErr := sys.MapCreate(&attr)
+
+ // Restore old limits regardless of what happened.
+ if err := unix.Prlimit(0, unix.RLIMIT_MEMLOCK, &oldLimit, nil); err != nil {
+ return fmt.Errorf("restoring old memlock rlimit: %s", err)
+ }
+
+ // Map creation successful, memcg accounting supported.
+ if mapErr == nil {
+ fd.Close()
+ return nil
+ }
+
+ // EPERM shows up when map creation would exceed the memory budget.
+ if errors.Is(mapErr, unix.EPERM) {
+ return unsupportedMemcgAccounting
+ }
+
+ // This shouldn't happen really.
+ return fmt.Errorf("unexpected error detecting memory cgroup accounting: %s", mapErr)
+}
+
+// RemoveMemlock removes the limit on the amount of memory the current
+// process can lock into RAM, if necessary.
+//
+// This is not required to load eBPF resources on kernel versions 5.11+
+// due to the introduction of cgroup-based memory accounting. On such kernels
+// the function is a no-op.
+//
+// Since the function may change global per-process limits it should be invoked
+// at program start up, in main() or init().
+//
+// This function exists as a convenience and should only be used when
+// permanently raising RLIMIT_MEMLOCK to infinite is appropriate. Consider
+// invoking prlimit(2) directly with a more reasonable limit if desired.
+//
+// Requires CAP_SYS_RESOURCE on kernels < 5.11.
+func RemoveMemlock() error {
+ if haveMemcgAccounting == nil {
+ return nil
+ }
+
+ if !errors.Is(haveMemcgAccounting, unsupportedMemcgAccounting) {
+ return haveMemcgAccounting
+ }
+
+ rlimitMu.Lock()
+ defer rlimitMu.Unlock()
+
+ // pid 0 affects the current process. Requires CAP_SYS_RESOURCE.
+ newLimit := unix.Rlimit{Cur: unix.RLIM_INFINITY, Max: unix.RLIM_INFINITY}
+ if err := unix.Prlimit(0, unix.RLIMIT_MEMLOCK, &newLimit, nil); err != nil {
+ return fmt.Errorf("failed to set memlock rlimit: %w", err)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/cilium/ebpf/run-tests.sh b/vendor/github.com/cilium/ebpf/run-tests.sh
new file mode 100644
index 000000000..629a069dd
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/run-tests.sh
@@ -0,0 +1,178 @@
+#!/usr/bin/env bash
+# Test the current package under a different kernel.
+# Requires virtme and qemu to be installed.
+# Examples:
+# Run all tests on a 5.4 kernel
+# $ ./run-tests.sh 5.4
+# Run a subset of tests:
+# $ ./run-tests.sh 5.4 ./link
+# Run using a local kernel image
+# $ ./run-tests.sh /path/to/bzImage
+
+set -euo pipefail
+
+script="$(realpath "$0")"
+readonly script
+
+quote_env() {
+ for var in "$@"; do
+ if [ -v "$var" ]; then
+ printf "%s=%q " "$var" "${!var}"
+ fi
+ done
+}
+
+declare -a preserved_env=(
+ PATH
+ CI_MAX_KERNEL_VERSION
+ TEST_SEED
+ KERNEL_VERSION
+)
+
+# This script is a bit like a Matryoshka doll since it keeps re-executing itself
+# in various different contexts:
+#
+# 1. invoked by the user like run-tests.sh 5.4
+# 2. invoked by go test like run-tests.sh --exec-vm
+# 3. invoked by init in the vm like run-tests.sh --exec-test
+#
+# This allows us to use all available CPU on the host machine to compile our
+# code, and then only use the VM to execute the test. This is because the VM
+# is usually slower at compiling than the host.
+if [[ "${1:-}" = "--exec-vm" ]]; then
+ shift
+
+ input="$1"
+ shift
+
+ # Use sudo if /dev/kvm isn't accessible by the current user.
+ sudo=""
+ if [[ ! -r /dev/kvm || ! -w /dev/kvm ]]; then
+ sudo="sudo"
+ fi
+ readonly sudo
+
+ testdir="$(dirname "$1")"
+ output="$(mktemp -d)"
+ printf -v cmd "%q " "$@"
+
+ if [[ "$(stat -c '%t:%T' -L /proc/$$/fd/0)" == "1:3" ]]; then
+ # stdin is /dev/null, which doesn't play well with qemu. Use a fifo as a
+ # blocking substitute.
+ mkfifo "${output}/fake-stdin"
+ # Open for reading and writing to avoid blocking.
+ exec 0<> "${output}/fake-stdin"
+ rm "${output}/fake-stdin"
+ fi
+
+ for ((i = 0; i < 3; i++)); do
+ if ! $sudo virtme-run --kimg "${input}/boot/vmlinuz" --memory 768M --pwd \
+ --rwdir="${testdir}=${testdir}" \
+ --rodir=/run/input="${input}" \
+ --rwdir=/run/output="${output}" \
+ --script-sh "$(quote_env "${preserved_env[@]}") \"$script\" --exec-test $cmd" \
+ --kopt possible_cpus=2; then # need at least two CPUs for some tests
+ exit 23
+ fi
+
+ if [[ -e "${output}/status" ]]; then
+ break
+ fi
+
+ if [[ -v CI ]]; then
+ echo "Retrying test run due to qemu crash"
+ continue
+ fi
+
+ exit 42
+ done
+
+ rc=$(<"${output}/status")
+ $sudo rm -r "$output"
+ exit $rc
+elif [[ "${1:-}" = "--exec-test" ]]; then
+ shift
+
+ mount -t bpf bpf /sys/fs/bpf
+ mount -t tracefs tracefs /sys/kernel/debug/tracing
+
+ if [[ -d "/run/input/bpf" ]]; then
+ export KERNEL_SELFTESTS="/run/input/bpf"
+ fi
+
+ if [[ -d "/run/input/lib/modules" ]]; then
+ find /run/input/lib/modules -type f -name bpf_testmod.ko -exec insmod {} \;
+ fi
+
+ dmesg --clear
+ rc=0
+ "$@" || rc=$?
+ dmesg
+ echo $rc > "/run/output/status"
+ exit $rc # this return code is "swallowed" by qemu
+fi
+
+if [[ -z "${1:-}" ]]; then
+ echo "Expecting kernel version or path as first argument"
+ exit 1
+fi
+
+readonly input="$(mktemp -d)"
+readonly tmp_dir="${TMPDIR:-/tmp}"
+
+fetch() {
+ echo Fetching "${1}"
+ pushd "${tmp_dir}" > /dev/null
+ curl --no-progress-meter -L -O --fail --etag-compare "${1}.etag" --etag-save "${1}.etag" "https://github.com/cilium/ci-kernels/raw/${BRANCH:-master}/${1}"
+ local ret=$?
+ popd > /dev/null
+ return $ret
+}
+
+machine="$(uname -m)"
+readonly machine
+
+if [[ -f "${1}" ]]; then
+ readonly kernel="${1}"
+ cp "${1}" "${input}/bzImage"
+else
+# LINUX_VERSION_CODE test compares this to discovered value.
+ export KERNEL_VERSION="${1}"
+
+ if [ "${machine}" = "x86_64" ]; then
+ readonly kernel="linux-${1}-amd64.tgz"
+ readonly selftests="linux-${1}-amd64-selftests-bpf.tgz"
+ elif [ "${machine}" = "aarch64" ]; then
+ readonly kernel="linux-${1}-arm64.tgz"
+ readonly selftests=""
+ else
+ echo "Arch ${machine} is not supported"
+ exit 1
+ fi
+
+ fetch "${kernel}"
+ tar xf "${tmp_dir}/${kernel}" -C "${input}"
+
+ if [ -n "${selftests}" ] && fetch "${selftests}"; then
+ echo "Decompressing selftests"
+ mkdir "${input}/bpf"
+ tar --strip-components=5 -xf "${tmp_dir}/${selftests}" -C "${input}/bpf"
+ else
+ echo "No selftests found, disabling"
+ fi
+fi
+shift
+
+args=(-short -coverpkg=./... -coverprofile=coverage.out -count 1 ./...)
+if (( $# > 0 )); then
+ args=("$@")
+fi
+
+export GOFLAGS=-mod=readonly
+export CGO_ENABLED=0
+
+echo Testing on "${kernel}"
+go test -exec "$script --exec-vm $input" "${args[@]}"
+echo "Test successful on ${kernel}"
+
+rm -r "${input}"
diff --git a/vendor/github.com/cilium/ebpf/syscalls.go b/vendor/github.com/cilium/ebpf/syscalls.go
new file mode 100644
index 000000000..cdf1fcf2e
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/syscalls.go
@@ -0,0 +1,304 @@
+package ebpf
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "os"
+ "runtime"
+
+ "github.com/cilium/ebpf/asm"
+ "github.com/cilium/ebpf/internal"
+ "github.com/cilium/ebpf/internal/sys"
+ "github.com/cilium/ebpf/internal/tracefs"
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+var (
+ // pre-allocating these here since they may
+ // get called in hot code paths and cause
+ // unnecessary memory allocations
+ sysErrKeyNotExist = sys.Error(ErrKeyNotExist, unix.ENOENT)
+ sysErrKeyExist = sys.Error(ErrKeyExist, unix.EEXIST)
+ sysErrNotSupported = sys.Error(ErrNotSupported, sys.ENOTSUPP)
+)
+
+// invalidBPFObjNameChar returns true if char may not appear in
+// a BPF object name.
+func invalidBPFObjNameChar(char rune) bool {
+ dotAllowed := objNameAllowsDot() == nil
+
+ switch {
+ case char >= 'A' && char <= 'Z':
+ return false
+ case char >= 'a' && char <= 'z':
+ return false
+ case char >= '0' && char <= '9':
+ return false
+ case dotAllowed && char == '.':
+ return false
+ case char == '_':
+ return false
+ default:
+ return true
+ }
+}
+
+func progLoad(insns asm.Instructions, typ ProgramType, license string) (*sys.FD, error) {
+ buf := bytes.NewBuffer(make([]byte, 0, insns.Size()))
+ if err := insns.Marshal(buf, internal.NativeEndian); err != nil {
+ return nil, err
+ }
+ bytecode := buf.Bytes()
+
+ return sys.ProgLoad(&sys.ProgLoadAttr{
+ ProgType: sys.ProgType(typ),
+ License: sys.NewStringPointer(license),
+ Insns: sys.NewSlicePointer(bytecode),
+ InsnCnt: uint32(len(bytecode) / asm.InstructionSize),
+ })
+}
+
+var haveNestedMaps = internal.NewFeatureTest("nested maps", "4.12", func() error {
+ _, err := sys.MapCreate(&sys.MapCreateAttr{
+ MapType: sys.MapType(ArrayOfMaps),
+ KeySize: 4,
+ ValueSize: 4,
+ MaxEntries: 1,
+ // Invalid file descriptor.
+ InnerMapFd: ^uint32(0),
+ })
+ if errors.Is(err, unix.EINVAL) {
+ return internal.ErrNotSupported
+ }
+ if errors.Is(err, unix.EBADF) {
+ return nil
+ }
+ return err
+})
+
+var haveMapMutabilityModifiers = internal.NewFeatureTest("read- and write-only maps", "5.2", func() error {
+ // This checks BPF_F_RDONLY_PROG and BPF_F_WRONLY_PROG. Since
+ // BPF_MAP_FREEZE appeared in 5.2 as well we don't do a separate check.
+ m, err := sys.MapCreate(&sys.MapCreateAttr{
+ MapType: sys.MapType(Array),
+ KeySize: 4,
+ ValueSize: 4,
+ MaxEntries: 1,
+ MapFlags: unix.BPF_F_RDONLY_PROG,
+ })
+ if err != nil {
+ return internal.ErrNotSupported
+ }
+ _ = m.Close()
+ return nil
+})
+
+var haveMmapableMaps = internal.NewFeatureTest("mmapable maps", "5.5", func() error {
+ // This checks BPF_F_MMAPABLE, which appeared in 5.5 for array maps.
+ m, err := sys.MapCreate(&sys.MapCreateAttr{
+ MapType: sys.MapType(Array),
+ KeySize: 4,
+ ValueSize: 4,
+ MaxEntries: 1,
+ MapFlags: unix.BPF_F_MMAPABLE,
+ })
+ if err != nil {
+ return internal.ErrNotSupported
+ }
+ _ = m.Close()
+ return nil
+})
+
+var haveInnerMaps = internal.NewFeatureTest("inner maps", "5.10", func() error {
+ // This checks BPF_F_INNER_MAP, which appeared in 5.10.
+ m, err := sys.MapCreate(&sys.MapCreateAttr{
+ MapType: sys.MapType(Array),
+ KeySize: 4,
+ ValueSize: 4,
+ MaxEntries: 1,
+ MapFlags: unix.BPF_F_INNER_MAP,
+ })
+
+ if err != nil {
+ return internal.ErrNotSupported
+ }
+ _ = m.Close()
+ return nil
+})
+
+var haveNoPreallocMaps = internal.NewFeatureTest("prealloc maps", "4.6", func() error {
+ // This checks BPF_F_NO_PREALLOC, which appeared in 4.6.
+ m, err := sys.MapCreate(&sys.MapCreateAttr{
+ MapType: sys.MapType(Hash),
+ KeySize: 4,
+ ValueSize: 4,
+ MaxEntries: 1,
+ MapFlags: unix.BPF_F_NO_PREALLOC,
+ })
+
+ if err != nil {
+ return internal.ErrNotSupported
+ }
+ _ = m.Close()
+ return nil
+})
+
+func wrapMapError(err error) error {
+ if err == nil {
+ return nil
+ }
+
+ if errors.Is(err, unix.ENOENT) {
+ return sysErrKeyNotExist
+ }
+
+ if errors.Is(err, unix.EEXIST) {
+ return sysErrKeyExist
+ }
+
+ if errors.Is(err, sys.ENOTSUPP) {
+ return sysErrNotSupported
+ }
+
+ if errors.Is(err, unix.E2BIG) {
+ return fmt.Errorf("key too big for map: %w", err)
+ }
+
+ return err
+}
+
+var haveObjName = internal.NewFeatureTest("object names", "4.15", func() error {
+ attr := sys.MapCreateAttr{
+ MapType: sys.MapType(Array),
+ KeySize: 4,
+ ValueSize: 4,
+ MaxEntries: 1,
+ MapName: sys.NewObjName("feature_test"),
+ }
+
+ fd, err := sys.MapCreate(&attr)
+ if err != nil {
+ return internal.ErrNotSupported
+ }
+
+ _ = fd.Close()
+ return nil
+})
+
+var objNameAllowsDot = internal.NewFeatureTest("dot in object names", "5.2", func() error {
+ if err := haveObjName(); err != nil {
+ return err
+ }
+
+ attr := sys.MapCreateAttr{
+ MapType: sys.MapType(Array),
+ KeySize: 4,
+ ValueSize: 4,
+ MaxEntries: 1,
+ MapName: sys.NewObjName(".test"),
+ }
+
+ fd, err := sys.MapCreate(&attr)
+ if err != nil {
+ return internal.ErrNotSupported
+ }
+
+ _ = fd.Close()
+ return nil
+})
+
+var haveBatchAPI = internal.NewFeatureTest("map batch api", "5.6", func() error {
+ var maxEntries uint32 = 2
+ attr := sys.MapCreateAttr{
+ MapType: sys.MapType(Hash),
+ KeySize: 4,
+ ValueSize: 4,
+ MaxEntries: maxEntries,
+ }
+
+ fd, err := sys.MapCreate(&attr)
+ if err != nil {
+ return internal.ErrNotSupported
+ }
+ defer fd.Close()
+
+ keys := []uint32{1, 2}
+ values := []uint32{3, 4}
+ kp, _ := marshalMapSyscallInput(keys, 8)
+ vp, _ := marshalMapSyscallInput(values, 8)
+
+ err = sys.MapUpdateBatch(&sys.MapUpdateBatchAttr{
+ MapFd: fd.Uint(),
+ Keys: kp,
+ Values: vp,
+ Count: maxEntries,
+ })
+ if err != nil {
+ return internal.ErrNotSupported
+ }
+ return nil
+})
+
+var haveProbeReadKernel = internal.NewFeatureTest("bpf_probe_read_kernel", "5.5", func() error {
+ insns := asm.Instructions{
+ asm.Mov.Reg(asm.R1, asm.R10),
+ asm.Add.Imm(asm.R1, -8),
+ asm.Mov.Imm(asm.R2, 8),
+ asm.Mov.Imm(asm.R3, 0),
+ asm.FnProbeReadKernel.Call(),
+ asm.Return(),
+ }
+
+ fd, err := progLoad(insns, Kprobe, "GPL")
+ if err != nil {
+ return internal.ErrNotSupported
+ }
+ _ = fd.Close()
+ return nil
+})
+
+var haveBPFToBPFCalls = internal.NewFeatureTest("bpf2bpf calls", "4.16", func() error {
+ insns := asm.Instructions{
+ asm.Call.Label("prog2").WithSymbol("prog1"),
+ asm.Return(),
+ asm.Mov.Imm(asm.R0, 0).WithSymbol("prog2"),
+ asm.Return(),
+ }
+
+ fd, err := progLoad(insns, SocketFilter, "MIT")
+ if err != nil {
+ return internal.ErrNotSupported
+ }
+ _ = fd.Close()
+ return nil
+})
+
+var haveSyscallWrapper = internal.NewFeatureTest("syscall wrapper", "4.17", func() error {
+ prefix := internal.PlatformPrefix()
+ if prefix == "" {
+ return fmt.Errorf("unable to find the platform prefix for (%s)", runtime.GOARCH)
+ }
+
+ args := tracefs.ProbeArgs{
+ Type: tracefs.Kprobe,
+ Symbol: prefix + "sys_bpf",
+ Pid: -1,
+ }
+
+ var err error
+ args.Group, err = tracefs.RandomGroup("ebpf_probe")
+ if err != nil {
+ return err
+ }
+
+ evt, err := tracefs.NewEvent(args)
+ if errors.Is(err, os.ErrNotExist) {
+ return internal.ErrNotSupported
+ }
+ if err != nil {
+ return err
+ }
+
+ return evt.Close()
+})
diff --git a/vendor/github.com/cilium/ebpf/types.go b/vendor/github.com/cilium/ebpf/types.go
new file mode 100644
index 000000000..af3651999
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/types.go
@@ -0,0 +1,285 @@
+package ebpf
+
+import (
+ "github.com/cilium/ebpf/internal/sys"
+ "github.com/cilium/ebpf/internal/unix"
+)
+
+//go:generate go run golang.org/x/tools/cmd/stringer@latest -output types_string.go -type=MapType,ProgramType,PinType
+
+// MapType indicates the type map structure
+// that will be initialized in the kernel.
+type MapType uint32
+
+// All the various map types that can be created
+const (
+ UnspecifiedMap MapType = iota
+ // Hash is a hash map
+ Hash
+ // Array is an array map
+ Array
+ // ProgramArray - A program array map is a special kind of array map whose map
+ // values contain only file descriptors referring to other eBPF
+ // programs. Thus, both the key_size and value_size must be
+ // exactly four bytes. This map is used in conjunction with the
+ // TailCall helper.
+ ProgramArray
+ // PerfEventArray - A perf event array is used in conjunction with PerfEventRead
+ // and PerfEventOutput calls, to read the raw bpf_perf_data from the registers.
+ PerfEventArray
+ // PerCPUHash - This data structure is useful for people who have high performance
+ // network needs and can reconcile adds at the end of some cycle, so that
+ // hashes can be lock free without the use of XAdd, which can be costly.
+ PerCPUHash
+ // PerCPUArray - This data structure is useful for people who have high performance
+ // network needs and can reconcile adds at the end of some cycle, so that
+ // hashes can be lock free without the use of XAdd, which can be costly.
+ // Each CPU gets a copy of this hash, the contents of all of which can be reconciled
+ // later.
+ PerCPUArray
+ // StackTrace - This holds whole user and kernel stack traces, it can be retrieved with
+ // GetStackID
+ StackTrace
+ // CGroupArray - This is a very niche structure used to help SKBInCGroup determine
+ // if an skb is from a socket belonging to a specific cgroup
+ CGroupArray
+ // LRUHash - This allows you to create a small hash structure that will purge the
+ // least recently used items rather than throw an error when you run out of memory
+ LRUHash
+ // LRUCPUHash - This is NOT like PerCPUHash, this structure is shared among the CPUs,
+ // it has more to do with including the CPU id with the LRU calculation so that if a
+ // particular CPU is using a value over-and-over again, then it will be saved, but if
+ // a value is being retrieved a lot but sparsely across CPUs it is not as important, basically
+ // giving weight to CPU locality over overall usage.
+ LRUCPUHash
+ // LPMTrie - This is an implementation of Longest-Prefix-Match Trie structure. It is useful,
+ // for storing things like IP addresses which can be bit masked allowing for keys of differing
+ // values to refer to the same reference based on their masks. See wikipedia for more details.
+ LPMTrie
+ // ArrayOfMaps - Each item in the array is another map. The inner map mustn't be a map of maps
+ // itself.
+ ArrayOfMaps
+ // HashOfMaps - Each item in the hash map is another map. The inner map mustn't be a map of maps
+ // itself.
+ HashOfMaps
+ // DevMap - Specialized map to store references to network devices.
+ DevMap
+ // SockMap - Specialized map to store references to sockets.
+ SockMap
+ // CPUMap - Specialized map to store references to CPUs.
+ CPUMap
+ // XSKMap - Specialized map for XDP programs to store references to open sockets.
+ XSKMap
+ // SockHash - Specialized hash to store references to sockets.
+ SockHash
+ // CGroupStorage - Special map for CGroups.
+ CGroupStorage
+ // ReusePortSockArray - Specialized map to store references to sockets that can be reused.
+ ReusePortSockArray
+ // PerCPUCGroupStorage - Special per CPU map for CGroups.
+ PerCPUCGroupStorage
+ // Queue - FIFO storage for BPF programs.
+ Queue
+ // Stack - LIFO storage for BPF programs.
+ Stack
+ // SkStorage - Specialized map for local storage at SK for BPF programs.
+ SkStorage
+ // DevMapHash - Hash-based indexing scheme for references to network devices.
+ DevMapHash
+ // StructOpsMap - This map holds a kernel struct with its function pointer implemented in a BPF
+ // program.
+ StructOpsMap
+ // RingBuf - Similar to PerfEventArray, but shared across all CPUs.
+ RingBuf
+ // InodeStorage - Specialized local storage map for inodes.
+ InodeStorage
+ // TaskStorage - Specialized local storage map for task_struct.
+ TaskStorage
+)
+
+// hasPerCPUValue returns true if the Map stores a value per CPU.
+func (mt MapType) hasPerCPUValue() bool {
+ return mt == PerCPUHash || mt == PerCPUArray || mt == LRUCPUHash || mt == PerCPUCGroupStorage
+}
+
+// canStoreMapOrProgram returns true if the Map stores references to another Map
+// or Program.
+func (mt MapType) canStoreMapOrProgram() bool {
+ return mt.canStoreMap() || mt.canStoreProgram()
+}
+
+// canStoreMap returns true if the map type accepts a map fd
+// for update and returns a map id for lookup.
+func (mt MapType) canStoreMap() bool {
+ return mt == ArrayOfMaps || mt == HashOfMaps
+}
+
+// canStoreProgram returns true if the map type accepts a program fd
+// for update and returns a program id for lookup.
+func (mt MapType) canStoreProgram() bool {
+ return mt == ProgramArray
+}
+
+// ProgramType of the eBPF program
+type ProgramType uint32
+
+// eBPF program types
+const (
+ UnspecifiedProgram ProgramType = iota
+ SocketFilter
+ Kprobe
+ SchedCLS
+ SchedACT
+ TracePoint
+ XDP
+ PerfEvent
+ CGroupSKB
+ CGroupSock
+ LWTIn
+ LWTOut
+ LWTXmit
+ SockOps
+ SkSKB
+ CGroupDevice
+ SkMsg
+ RawTracepoint
+ CGroupSockAddr
+ LWTSeg6Local
+ LircMode2
+ SkReuseport
+ FlowDissector
+ CGroupSysctl
+ RawTracepointWritable
+ CGroupSockopt
+ Tracing
+ StructOps
+ Extension
+ LSM
+ SkLookup
+ Syscall
+)
+
+// AttachType of the eBPF program, needed to differentiate allowed context accesses in
+// some newer program types like CGroupSockAddr. Should be set to AttachNone if not required.
+// Will cause invalid argument (EINVAL) at program load time if set incorrectly.
+type AttachType uint32
+
+//go:generate go run golang.org/x/tools/cmd/stringer@latest -type AttachType -trimprefix Attach
+
+// AttachNone is an alias for AttachCGroupInetIngress for readability reasons.
+const AttachNone AttachType = 0
+
+const (
+ AttachCGroupInetIngress AttachType = iota
+ AttachCGroupInetEgress
+ AttachCGroupInetSockCreate
+ AttachCGroupSockOps
+ AttachSkSKBStreamParser
+ AttachSkSKBStreamVerdict
+ AttachCGroupDevice
+ AttachSkMsgVerdict
+ AttachCGroupInet4Bind
+ AttachCGroupInet6Bind
+ AttachCGroupInet4Connect
+ AttachCGroupInet6Connect
+ AttachCGroupInet4PostBind
+ AttachCGroupInet6PostBind
+ AttachCGroupUDP4Sendmsg
+ AttachCGroupUDP6Sendmsg
+ AttachLircMode2
+ AttachFlowDissector
+ AttachCGroupSysctl
+ AttachCGroupUDP4Recvmsg
+ AttachCGroupUDP6Recvmsg
+ AttachCGroupGetsockopt
+ AttachCGroupSetsockopt
+ AttachTraceRawTp
+ AttachTraceFEntry
+ AttachTraceFExit
+ AttachModifyReturn
+ AttachLSMMac
+ AttachTraceIter
+ AttachCgroupInet4GetPeername
+ AttachCgroupInet6GetPeername
+ AttachCgroupInet4GetSockname
+ AttachCgroupInet6GetSockname
+ AttachXDPDevMap
+ AttachCgroupInetSockRelease
+ AttachXDPCPUMap
+ AttachSkLookup
+ AttachXDP
+ AttachSkSKBVerdict
+ AttachSkReuseportSelect
+ AttachSkReuseportSelectOrMigrate
+ AttachPerfEvent
+ AttachTraceKprobeMulti
+)
+
+// AttachFlags of the eBPF program used in BPF_PROG_ATTACH command
+type AttachFlags uint32
+
+// PinType determines whether a map is pinned into a BPFFS.
+type PinType int
+
+// Valid pin types.
+//
+// Mirrors enum libbpf_pin_type.
+const (
+ PinNone PinType = iota
+ // Pin an object by using its name as the filename.
+ PinByName
+)
+
+// LoadPinOptions control how a pinned object is loaded.
+type LoadPinOptions struct {
+ // Request a read-only or write-only object. The default is a read-write
+ // object. Only one of the flags may be set.
+ ReadOnly bool
+ WriteOnly bool
+
+ // Raw flags for the syscall. Other fields of this struct take precedence.
+ Flags uint32
+}
+
+// Marshal returns a value suitable for BPF_OBJ_GET syscall file_flags parameter.
+func (lpo *LoadPinOptions) Marshal() uint32 {
+ if lpo == nil {
+ return 0
+ }
+
+ flags := lpo.Flags
+ if lpo.ReadOnly {
+ flags |= unix.BPF_F_RDONLY
+ }
+ if lpo.WriteOnly {
+ flags |= unix.BPF_F_WRONLY
+ }
+ return flags
+}
+
+// BatchOptions batch map operations options
+//
+// Mirrors libbpf struct bpf_map_batch_opts
+// Currently BPF_F_FLAG is the only supported
+// flag (for ElemFlags).
+type BatchOptions struct {
+ ElemFlags uint64
+ Flags uint64
+}
+
+// LogLevel controls the verbosity of the kernel's eBPF program verifier.
+// These constants can be used for the ProgramOptions.LogLevel field.
+type LogLevel = sys.LogLevel
+
+const (
+ // Print verifier state at branch points.
+ LogLevelBranch = sys.BPF_LOG_LEVEL1
+
+ // Print verifier state for every instruction.
+ // Available since Linux v5.2.
+ LogLevelInstruction = sys.BPF_LOG_LEVEL2
+
+ // Print verifier errors and stats at the end of the verification process.
+ // Available since Linux v5.2.
+ LogLevelStats = sys.BPF_LOG_STATS
+)
diff --git a/vendor/github.com/cilium/ebpf/types_string.go b/vendor/github.com/cilium/ebpf/types_string.go
new file mode 100644
index 000000000..5679f2254
--- /dev/null
+++ b/vendor/github.com/cilium/ebpf/types_string.go
@@ -0,0 +1,118 @@
+// Code generated by "stringer -output types_string.go -type=MapType,ProgramType,PinType"; DO NOT EDIT.
+
+package ebpf
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[UnspecifiedMap-0]
+ _ = x[Hash-1]
+ _ = x[Array-2]
+ _ = x[ProgramArray-3]
+ _ = x[PerfEventArray-4]
+ _ = x[PerCPUHash-5]
+ _ = x[PerCPUArray-6]
+ _ = x[StackTrace-7]
+ _ = x[CGroupArray-8]
+ _ = x[LRUHash-9]
+ _ = x[LRUCPUHash-10]
+ _ = x[LPMTrie-11]
+ _ = x[ArrayOfMaps-12]
+ _ = x[HashOfMaps-13]
+ _ = x[DevMap-14]
+ _ = x[SockMap-15]
+ _ = x[CPUMap-16]
+ _ = x[XSKMap-17]
+ _ = x[SockHash-18]
+ _ = x[CGroupStorage-19]
+ _ = x[ReusePortSockArray-20]
+ _ = x[PerCPUCGroupStorage-21]
+ _ = x[Queue-22]
+ _ = x[Stack-23]
+ _ = x[SkStorage-24]
+ _ = x[DevMapHash-25]
+ _ = x[StructOpsMap-26]
+ _ = x[RingBuf-27]
+ _ = x[InodeStorage-28]
+ _ = x[TaskStorage-29]
+}
+
+const _MapType_name = "UnspecifiedMapHashArrayProgramArrayPerfEventArrayPerCPUHashPerCPUArrayStackTraceCGroupArrayLRUHashLRUCPUHashLPMTrieArrayOfMapsHashOfMapsDevMapSockMapCPUMapXSKMapSockHashCGroupStorageReusePortSockArrayPerCPUCGroupStorageQueueStackSkStorageDevMapHashStructOpsMapRingBufInodeStorageTaskStorage"
+
+var _MapType_index = [...]uint16{0, 14, 18, 23, 35, 49, 59, 70, 80, 91, 98, 108, 115, 126, 136, 142, 149, 155, 161, 169, 182, 200, 219, 224, 229, 238, 248, 260, 267, 279, 290}
+
+func (i MapType) String() string {
+ if i >= MapType(len(_MapType_index)-1) {
+ return "MapType(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _MapType_name[_MapType_index[i]:_MapType_index[i+1]]
+}
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[UnspecifiedProgram-0]
+ _ = x[SocketFilter-1]
+ _ = x[Kprobe-2]
+ _ = x[SchedCLS-3]
+ _ = x[SchedACT-4]
+ _ = x[TracePoint-5]
+ _ = x[XDP-6]
+ _ = x[PerfEvent-7]
+ _ = x[CGroupSKB-8]
+ _ = x[CGroupSock-9]
+ _ = x[LWTIn-10]
+ _ = x[LWTOut-11]
+ _ = x[LWTXmit-12]
+ _ = x[SockOps-13]
+ _ = x[SkSKB-14]
+ _ = x[CGroupDevice-15]
+ _ = x[SkMsg-16]
+ _ = x[RawTracepoint-17]
+ _ = x[CGroupSockAddr-18]
+ _ = x[LWTSeg6Local-19]
+ _ = x[LircMode2-20]
+ _ = x[SkReuseport-21]
+ _ = x[FlowDissector-22]
+ _ = x[CGroupSysctl-23]
+ _ = x[RawTracepointWritable-24]
+ _ = x[CGroupSockopt-25]
+ _ = x[Tracing-26]
+ _ = x[StructOps-27]
+ _ = x[Extension-28]
+ _ = x[LSM-29]
+ _ = x[SkLookup-30]
+ _ = x[Syscall-31]
+}
+
+const _ProgramType_name = "UnspecifiedProgramSocketFilterKprobeSchedCLSSchedACTTracePointXDPPerfEventCGroupSKBCGroupSockLWTInLWTOutLWTXmitSockOpsSkSKBCGroupDeviceSkMsgRawTracepointCGroupSockAddrLWTSeg6LocalLircMode2SkReuseportFlowDissectorCGroupSysctlRawTracepointWritableCGroupSockoptTracingStructOpsExtensionLSMSkLookupSyscall"
+
+var _ProgramType_index = [...]uint16{0, 18, 30, 36, 44, 52, 62, 65, 74, 83, 93, 98, 104, 111, 118, 123, 135, 140, 153, 167, 179, 188, 199, 212, 224, 245, 258, 265, 274, 283, 286, 294, 301}
+
+func (i ProgramType) String() string {
+ if i >= ProgramType(len(_ProgramType_index)-1) {
+ return "ProgramType(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _ProgramType_name[_ProgramType_index[i]:_ProgramType_index[i+1]]
+}
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[PinNone-0]
+ _ = x[PinByName-1]
+}
+
+const _PinType_name = "PinNonePinByName"
+
+var _PinType_index = [...]uint8{0, 7, 16}
+
+func (i PinType) String() string {
+ if i < 0 || i >= PinType(len(_PinType_index)-1) {
+ return "PinType(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _PinType_name[_PinType_index[i]:_PinType_index[i+1]]
+}
diff --git a/vendor/github.com/cilium/proxy/LICENSE b/vendor/github.com/cilium/proxy/LICENSE
new file mode 100644
index 000000000..a2e486a80
--- /dev/null
+++ b/vendor/github.com/cilium/proxy/LICENSE
@@ -0,0 +1,202 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} Authors of Cilium
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
diff --git a/vendor/github.com/cilium/proxy/go/cilium/api/accesslog.go b/vendor/github.com/cilium/proxy/go/cilium/api/accesslog.go
new file mode 100644
index 000000000..89d60eec2
--- /dev/null
+++ b/vendor/github.com/cilium/proxy/go/cilium/api/accesslog.go
@@ -0,0 +1,19 @@
+// Copyright 2020 Authors of Cilium
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cilium
+
+// Add an exported type alias for L7 log entry oneof, so that the Go code does
+// not need to know all the individual types
+type IsLogEntry_L7 = isLogEntry_L7
diff --git a/vendor/github.com/cilium/proxy/go/cilium/api/accesslog.pb.go b/vendor/github.com/cilium/proxy/go/cilium/api/accesslog.pb.go
new file mode 100644
index 000000000..7d5f56f45
--- /dev/null
+++ b/vendor/github.com/cilium/proxy/go/cilium/api/accesslog.pb.go
@@ -0,0 +1,849 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.23.0
+// protoc v4.23.1
+// source: cilium/api/accesslog.proto
+
+package cilium
+
+import (
+ proto "github.com/golang/protobuf/proto"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+type HttpProtocol int32
+
+const (
+ HttpProtocol_HTTP10 HttpProtocol = 0
+ HttpProtocol_HTTP11 HttpProtocol = 1
+ HttpProtocol_HTTP2 HttpProtocol = 2
+)
+
+// Enum value maps for HttpProtocol.
+var (
+ HttpProtocol_name = map[int32]string{
+ 0: "HTTP10",
+ 1: "HTTP11",
+ 2: "HTTP2",
+ }
+ HttpProtocol_value = map[string]int32{
+ "HTTP10": 0,
+ "HTTP11": 1,
+ "HTTP2": 2,
+ }
+)
+
+func (x HttpProtocol) Enum() *HttpProtocol {
+ p := new(HttpProtocol)
+ *p = x
+ return p
+}
+
+func (x HttpProtocol) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (HttpProtocol) Descriptor() protoreflect.EnumDescriptor {
+ return file_cilium_api_accesslog_proto_enumTypes[0].Descriptor()
+}
+
+func (HttpProtocol) Type() protoreflect.EnumType {
+ return &file_cilium_api_accesslog_proto_enumTypes[0]
+}
+
+func (x HttpProtocol) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use HttpProtocol.Descriptor instead.
+func (HttpProtocol) EnumDescriptor() ([]byte, []int) {
+ return file_cilium_api_accesslog_proto_rawDescGZIP(), []int{0}
+}
+
+type EntryType int32
+
+const (
+ EntryType_Request EntryType = 0
+ EntryType_Response EntryType = 1
+ EntryType_Denied EntryType = 2
+)
+
+// Enum value maps for EntryType.
+var (
+ EntryType_name = map[int32]string{
+ 0: "Request",
+ 1: "Response",
+ 2: "Denied",
+ }
+ EntryType_value = map[string]int32{
+ "Request": 0,
+ "Response": 1,
+ "Denied": 2,
+ }
+)
+
+func (x EntryType) Enum() *EntryType {
+ p := new(EntryType)
+ *p = x
+ return p
+}
+
+func (x EntryType) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (EntryType) Descriptor() protoreflect.EnumDescriptor {
+ return file_cilium_api_accesslog_proto_enumTypes[1].Descriptor()
+}
+
+func (EntryType) Type() protoreflect.EnumType {
+ return &file_cilium_api_accesslog_proto_enumTypes[1]
+}
+
+func (x EntryType) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use EntryType.Descriptor instead.
+func (EntryType) EnumDescriptor() ([]byte, []int) {
+ return file_cilium_api_accesslog_proto_rawDescGZIP(), []int{1}
+}
+
+type KeyValue struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+ Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (x *KeyValue) Reset() {
+ *x = KeyValue{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cilium_api_accesslog_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *KeyValue) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*KeyValue) ProtoMessage() {}
+
+func (x *KeyValue) ProtoReflect() protoreflect.Message {
+ mi := &file_cilium_api_accesslog_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use KeyValue.ProtoReflect.Descriptor instead.
+func (*KeyValue) Descriptor() ([]byte, []int) {
+ return file_cilium_api_accesslog_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *KeyValue) GetKey() string {
+ if x != nil {
+ return x.Key
+ }
+ return ""
+}
+
+func (x *KeyValue) GetValue() string {
+ if x != nil {
+ return x.Value
+ }
+ return ""
+}
+
+type HttpLogEntry struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ HttpProtocol HttpProtocol `protobuf:"varint,1,opt,name=http_protocol,json=httpProtocol,proto3,enum=cilium.HttpProtocol" json:"http_protocol,omitempty"`
+ // Request info that is also retained for the response
+ Scheme string `protobuf:"bytes,2,opt,name=scheme,proto3" json:"scheme,omitempty"` // Envoy "x-forwarded-proto", e.g., "http", "https"
+ Host string `protobuf:"bytes,3,opt,name=host,proto3" json:"host,omitempty"` // Envoy ":authority" header
+ Path string `protobuf:"bytes,4,opt,name=path,proto3" json:"path,omitempty"` // Envoy ":path" header
+ Method string `protobuf:"bytes,5,opt,name=method,proto3" json:"method,omitempty"` // Envoy ":method" header
+ // Request or response headers not included above
+ Headers []*KeyValue `protobuf:"bytes,6,rep,name=headers,proto3" json:"headers,omitempty"`
+ // Response info
+ Status uint32 `protobuf:"varint,7,opt,name=status,proto3" json:"status,omitempty"` // Envoy ":status" header, zero for request
+ // missing_headers includes both headers that were added to the
+ // request, and headers that were merely logged as missing
+ MissingHeaders []*KeyValue `protobuf:"bytes,8,rep,name=missing_headers,json=missingHeaders,proto3" json:"missing_headers,omitempty"`
+ // rejected_headers includes headers that were flagged as unallowed,
+ // which may have been removed, or merely logged and the request still
+ // allowed, or the request may have been dropped due to them.
+ RejectedHeaders []*KeyValue `protobuf:"bytes,9,rep,name=rejected_headers,json=rejectedHeaders,proto3" json:"rejected_headers,omitempty"`
+}
+
+func (x *HttpLogEntry) Reset() {
+ *x = HttpLogEntry{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cilium_api_accesslog_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *HttpLogEntry) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HttpLogEntry) ProtoMessage() {}
+
+func (x *HttpLogEntry) ProtoReflect() protoreflect.Message {
+ mi := &file_cilium_api_accesslog_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HttpLogEntry.ProtoReflect.Descriptor instead.
+func (*HttpLogEntry) Descriptor() ([]byte, []int) {
+ return file_cilium_api_accesslog_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *HttpLogEntry) GetHttpProtocol() HttpProtocol {
+ if x != nil {
+ return x.HttpProtocol
+ }
+ return HttpProtocol_HTTP10
+}
+
+func (x *HttpLogEntry) GetScheme() string {
+ if x != nil {
+ return x.Scheme
+ }
+ return ""
+}
+
+func (x *HttpLogEntry) GetHost() string {
+ if x != nil {
+ return x.Host
+ }
+ return ""
+}
+
+func (x *HttpLogEntry) GetPath() string {
+ if x != nil {
+ return x.Path
+ }
+ return ""
+}
+
+func (x *HttpLogEntry) GetMethod() string {
+ if x != nil {
+ return x.Method
+ }
+ return ""
+}
+
+func (x *HttpLogEntry) GetHeaders() []*KeyValue {
+ if x != nil {
+ return x.Headers
+ }
+ return nil
+}
+
+func (x *HttpLogEntry) GetStatus() uint32 {
+ if x != nil {
+ return x.Status
+ }
+ return 0
+}
+
+func (x *HttpLogEntry) GetMissingHeaders() []*KeyValue {
+ if x != nil {
+ return x.MissingHeaders
+ }
+ return nil
+}
+
+func (x *HttpLogEntry) GetRejectedHeaders() []*KeyValue {
+ if x != nil {
+ return x.RejectedHeaders
+ }
+ return nil
+}
+
+type KafkaLogEntry struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // correlation_id is a user-supplied integer value that will be passed
+ // back with the response
+ CorrelationId int32 `protobuf:"varint,1,opt,name=correlation_id,json=correlationId,proto3" json:"correlation_id,omitempty"`
+ // error_code is the Kafka error code being returned
+ // Ref. https://kafka.apache.org/protocol#protocol_error_codes
+ ErrorCode int32 `protobuf:"varint,2,opt,name=error_code,json=errorCode,proto3" json:"error_code,omitempty"`
+ // api_version of the Kafka api used
+ // Ref. https://kafka.apache.org/protocol#protocol_compatibility
+ ApiVersion int32 `protobuf:"varint,3,opt,name=api_version,json=apiVersion,proto3" json:"api_version,omitempty"`
+ // api_key for Kafka message
+ // Reference: https://kafka.apache.org/protocol#protocol_api_keys
+ ApiKey int32 `protobuf:"varint,4,opt,name=api_key,json=apiKey,proto3" json:"api_key,omitempty"`
+ // Topics of the request
+ // Optional, as not all messages have topics (ex. LeaveGroup, Heartbeat)
+ Topics []string `protobuf:"bytes,5,rep,name=topics,proto3" json:"topics,omitempty"`
+}
+
+func (x *KafkaLogEntry) Reset() {
+ *x = KafkaLogEntry{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cilium_api_accesslog_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *KafkaLogEntry) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*KafkaLogEntry) ProtoMessage() {}
+
+func (x *KafkaLogEntry) ProtoReflect() protoreflect.Message {
+ mi := &file_cilium_api_accesslog_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use KafkaLogEntry.ProtoReflect.Descriptor instead.
+func (*KafkaLogEntry) Descriptor() ([]byte, []int) {
+ return file_cilium_api_accesslog_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *KafkaLogEntry) GetCorrelationId() int32 {
+ if x != nil {
+ return x.CorrelationId
+ }
+ return 0
+}
+
+func (x *KafkaLogEntry) GetErrorCode() int32 {
+ if x != nil {
+ return x.ErrorCode
+ }
+ return 0
+}
+
+func (x *KafkaLogEntry) GetApiVersion() int32 {
+ if x != nil {
+ return x.ApiVersion
+ }
+ return 0
+}
+
+func (x *KafkaLogEntry) GetApiKey() int32 {
+ if x != nil {
+ return x.ApiKey
+ }
+ return 0
+}
+
+func (x *KafkaLogEntry) GetTopics() []string {
+ if x != nil {
+ return x.Topics
+ }
+ return nil
+}
+
+type L7LogEntry struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Proto string `protobuf:"bytes,1,opt,name=proto,proto3" json:"proto,omitempty"`
+ Fields map[string]string `protobuf:"bytes,2,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+func (x *L7LogEntry) Reset() {
+ *x = L7LogEntry{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cilium_api_accesslog_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *L7LogEntry) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*L7LogEntry) ProtoMessage() {}
+
+func (x *L7LogEntry) ProtoReflect() protoreflect.Message {
+ mi := &file_cilium_api_accesslog_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use L7LogEntry.ProtoReflect.Descriptor instead.
+func (*L7LogEntry) Descriptor() ([]byte, []int) {
+ return file_cilium_api_accesslog_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *L7LogEntry) GetProto() string {
+ if x != nil {
+ return x.Proto
+ }
+ return ""
+}
+
+func (x *L7LogEntry) GetFields() map[string]string {
+ if x != nil {
+ return x.Fields
+ }
+ return nil
+}
+
+type LogEntry struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The time that Cilium filter captured this log entry,
+ // in, nanoseconds since 1/1/1970.
+ Timestamp uint64 `protobuf:"varint,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
+ // 'true' if the request was received by an ingress listener,
+ // 'false' if received by an egress listener
+ IsIngress bool `protobuf:"varint,15,opt,name=is_ingress,json=isIngress,proto3" json:"is_ingress,omitempty"`
+ EntryType EntryType `protobuf:"varint,3,opt,name=entry_type,json=entryType,proto3,enum=cilium.EntryType" json:"entry_type,omitempty"`
+ // Cilium network policy resource name
+ PolicyName string `protobuf:"bytes,4,opt,name=policy_name,json=policyName,proto3" json:"policy_name,omitempty"`
+ // Cilium rule reference
+ CiliumRuleRef string `protobuf:"bytes,5,opt,name=cilium_rule_ref,json=ciliumRuleRef,proto3" json:"cilium_rule_ref,omitempty"`
+ // Cilium security ID of the source and destination
+ SourceSecurityId uint32 `protobuf:"varint,6,opt,name=source_security_id,json=sourceSecurityId,proto3" json:"source_security_id,omitempty"`
+ DestinationSecurityId uint32 `protobuf:"varint,16,opt,name=destination_security_id,json=destinationSecurityId,proto3" json:"destination_security_id,omitempty"`
+ // These fields record the original source and destination addresses,
+ // stored in ipv4:port or [ipv6]:port format.
+ SourceAddress string `protobuf:"bytes,7,opt,name=source_address,json=sourceAddress,proto3" json:"source_address,omitempty"`
+ DestinationAddress string `protobuf:"bytes,8,opt,name=destination_address,json=destinationAddress,proto3" json:"destination_address,omitempty"`
+ // Types that are assignable to L7:
+ //
+ // *LogEntry_Http
+ // *LogEntry_Kafka
+ // *LogEntry_GenericL7
+ L7 isLogEntry_L7 `protobuf_oneof:"l7"`
+}
+
+func (x *LogEntry) Reset() {
+ *x = LogEntry{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cilium_api_accesslog_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *LogEntry) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*LogEntry) ProtoMessage() {}
+
+func (x *LogEntry) ProtoReflect() protoreflect.Message {
+ mi := &file_cilium_api_accesslog_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use LogEntry.ProtoReflect.Descriptor instead.
+func (*LogEntry) Descriptor() ([]byte, []int) {
+ return file_cilium_api_accesslog_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *LogEntry) GetTimestamp() uint64 {
+ if x != nil {
+ return x.Timestamp
+ }
+ return 0
+}
+
+func (x *LogEntry) GetIsIngress() bool {
+ if x != nil {
+ return x.IsIngress
+ }
+ return false
+}
+
+func (x *LogEntry) GetEntryType() EntryType {
+ if x != nil {
+ return x.EntryType
+ }
+ return EntryType_Request
+}
+
+func (x *LogEntry) GetPolicyName() string {
+ if x != nil {
+ return x.PolicyName
+ }
+ return ""
+}
+
+func (x *LogEntry) GetCiliumRuleRef() string {
+ if x != nil {
+ return x.CiliumRuleRef
+ }
+ return ""
+}
+
+func (x *LogEntry) GetSourceSecurityId() uint32 {
+ if x != nil {
+ return x.SourceSecurityId
+ }
+ return 0
+}
+
+func (x *LogEntry) GetDestinationSecurityId() uint32 {
+ if x != nil {
+ return x.DestinationSecurityId
+ }
+ return 0
+}
+
+func (x *LogEntry) GetSourceAddress() string {
+ if x != nil {
+ return x.SourceAddress
+ }
+ return ""
+}
+
+func (x *LogEntry) GetDestinationAddress() string {
+ if x != nil {
+ return x.DestinationAddress
+ }
+ return ""
+}
+
+func (m *LogEntry) GetL7() isLogEntry_L7 {
+ if m != nil {
+ return m.L7
+ }
+ return nil
+}
+
+func (x *LogEntry) GetHttp() *HttpLogEntry {
+ if x, ok := x.GetL7().(*LogEntry_Http); ok {
+ return x.Http
+ }
+ return nil
+}
+
+func (x *LogEntry) GetKafka() *KafkaLogEntry {
+ if x, ok := x.GetL7().(*LogEntry_Kafka); ok {
+ return x.Kafka
+ }
+ return nil
+}
+
+func (x *LogEntry) GetGenericL7() *L7LogEntry {
+ if x, ok := x.GetL7().(*LogEntry_GenericL7); ok {
+ return x.GenericL7
+ }
+ return nil
+}
+
+type isLogEntry_L7 interface {
+ isLogEntry_L7()
+}
+
+type LogEntry_Http struct {
+ Http *HttpLogEntry `protobuf:"bytes,100,opt,name=http,proto3,oneof"`
+}
+
+type LogEntry_Kafka struct {
+ Kafka *KafkaLogEntry `protobuf:"bytes,101,opt,name=kafka,proto3,oneof"`
+}
+
+type LogEntry_GenericL7 struct {
+ GenericL7 *L7LogEntry `protobuf:"bytes,102,opt,name=generic_l7,json=genericL7,proto3,oneof"`
+}
+
+func (*LogEntry_Http) isLogEntry_L7() {}
+
+func (*LogEntry_Kafka) isLogEntry_L7() {}
+
+func (*LogEntry_GenericL7) isLogEntry_L7() {}
+
+var File_cilium_api_accesslog_proto protoreflect.FileDescriptor
+
+var file_cilium_api_accesslog_proto_rawDesc = []byte{
+ 0x0a, 0x1a, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x63, 0x63,
+ 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x63, 0x69,
+ 0x6c, 0x69, 0x75, 0x6d, 0x22, 0x32, 0x0a, 0x08, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65,
+ 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b,
+ 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xdd, 0x02, 0x0a, 0x0c, 0x48, 0x74, 0x74,
+ 0x70, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x39, 0x0a, 0x0d, 0x68, 0x74, 0x74,
+ 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e,
+ 0x32, 0x14, 0x2e, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x50, 0x72,
+ 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x0c, 0x68, 0x74, 0x74, 0x70, 0x50, 0x72, 0x6f, 0x74,
+ 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04,
+ 0x68, 0x6f, 0x73, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74,
+ 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
+ 0x70, 0x61, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x05,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x2a, 0x0a, 0x07,
+ 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e,
+ 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52,
+ 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74,
+ 0x75, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73,
+ 0x12, 0x39, 0x0a, 0x0f, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x5f, 0x68, 0x65, 0x61, 0x64,
+ 0x65, 0x72, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x63, 0x69, 0x6c, 0x69,
+ 0x75, 0x6d, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0e, 0x6d, 0x69, 0x73,
+ 0x73, 0x69, 0x6e, 0x67, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x3b, 0x0a, 0x10, 0x72,
+ 0x65, 0x6a, 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18,
+ 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2e, 0x4b,
+ 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0f, 0x72, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x65,
+ 0x64, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x0d, 0x4b, 0x61, 0x66,
+ 0x6b, 0x61, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x25, 0x0a, 0x0e, 0x63, 0x6f,
+ 0x72, 0x72, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x05, 0x52, 0x0d, 0x63, 0x6f, 0x72, 0x72, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49,
+ 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x64, 0x65,
+ 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f,
+ 0x6e, 0x12, 0x17, 0x0a, 0x07, 0x61, 0x70, 0x69, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x05, 0x52, 0x06, 0x61, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x6f,
+ 0x70, 0x69, 0x63, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x74, 0x6f, 0x70, 0x69,
+ 0x63, 0x73, 0x22, 0x95, 0x01, 0x0a, 0x0a, 0x4c, 0x37, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72,
+ 0x79, 0x12, 0x14, 0x0a, 0x05, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x05, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x36, 0x0a, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64,
+ 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d,
+ 0x2e, 0x4c, 0x37, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x46, 0x69, 0x65, 0x6c,
+ 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a,
+ 0x39, 0x0a, 0x0b, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10,
+ 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79,
+ 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x96, 0x04, 0x0a, 0x08, 0x4c,
+ 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73,
+ 0x74, 0x61, 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65,
+ 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x73, 0x5f, 0x69, 0x6e, 0x67, 0x72,
+ 0x65, 0x73, 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x69, 0x73, 0x49, 0x6e, 0x67,
+ 0x72, 0x65, 0x73, 0x73, 0x12, 0x30, 0x0a, 0x0a, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x5f, 0x74, 0x79,
+ 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x11, 0x2e, 0x63, 0x69, 0x6c, 0x69, 0x75,
+ 0x6d, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x65, 0x6e, 0x74,
+ 0x72, 0x79, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79,
+ 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x6f, 0x6c,
+ 0x69, 0x63, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x63, 0x69, 0x6c, 0x69, 0x75,
+ 0x6d, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x5f, 0x72, 0x65, 0x66, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x0d, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x65, 0x66, 0x12,
+ 0x2c, 0x0a, 0x12, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69,
+ 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x10, 0x73, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x49, 0x64, 0x12, 0x36, 0x0a,
+ 0x17, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x63,
+ 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x15,
+ 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x63, 0x75, 0x72,
+ 0x69, 0x74, 0x79, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f,
+ 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x2f, 0x0a, 0x13,
+ 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x64, 0x64, 0x72,
+ 0x65, 0x73, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x64, 0x65, 0x73, 0x74, 0x69,
+ 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x2a, 0x0a,
+ 0x04, 0x68, 0x74, 0x74, 0x70, 0x18, 0x64, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x69,
+ 0x6c, 0x69, 0x75, 0x6d, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72,
+ 0x79, 0x48, 0x00, 0x52, 0x04, 0x68, 0x74, 0x74, 0x70, 0x12, 0x2d, 0x0a, 0x05, 0x6b, 0x61, 0x66,
+ 0x6b, 0x61, 0x18, 0x65, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x63, 0x69, 0x6c, 0x69, 0x75,
+ 0x6d, 0x2e, 0x4b, 0x61, 0x66, 0x6b, 0x61, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x48,
+ 0x00, 0x52, 0x05, 0x6b, 0x61, 0x66, 0x6b, 0x61, 0x12, 0x33, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65,
+ 0x72, 0x69, 0x63, 0x5f, 0x6c, 0x37, 0x18, 0x66, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63,
+ 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2e, 0x4c, 0x37, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79,
+ 0x48, 0x00, 0x52, 0x09, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x4c, 0x37, 0x42, 0x04, 0x0a,
+ 0x02, 0x6c, 0x37, 0x2a, 0x31, 0x0a, 0x0c, 0x48, 0x74, 0x74, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f,
+ 0x63, 0x6f, 0x6c, 0x12, 0x0a, 0x0a, 0x06, 0x48, 0x54, 0x54, 0x50, 0x31, 0x30, 0x10, 0x00, 0x12,
+ 0x0a, 0x0a, 0x06, 0x48, 0x54, 0x54, 0x50, 0x31, 0x31, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x48,
+ 0x54, 0x54, 0x50, 0x32, 0x10, 0x02, 0x2a, 0x32, 0x0a, 0x09, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x54,
+ 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x10, 0x00,
+ 0x12, 0x0c, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x10, 0x01, 0x12, 0x0a,
+ 0x0a, 0x06, 0x44, 0x65, 0x6e, 0x69, 0x65, 0x64, 0x10, 0x02, 0x42, 0x2e, 0x5a, 0x2c, 0x67, 0x69,
+ 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2f,
+ 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2f, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2f,
+ 0x61, 0x70, 0x69, 0x3b, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x33,
+}
+
+var (
+ file_cilium_api_accesslog_proto_rawDescOnce sync.Once
+ file_cilium_api_accesslog_proto_rawDescData = file_cilium_api_accesslog_proto_rawDesc
+)
+
+func file_cilium_api_accesslog_proto_rawDescGZIP() []byte {
+ file_cilium_api_accesslog_proto_rawDescOnce.Do(func() {
+ file_cilium_api_accesslog_proto_rawDescData = protoimpl.X.CompressGZIP(file_cilium_api_accesslog_proto_rawDescData)
+ })
+ return file_cilium_api_accesslog_proto_rawDescData
+}
+
+var file_cilium_api_accesslog_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
+var file_cilium_api_accesslog_proto_msgTypes = make([]protoimpl.MessageInfo, 6)
+var file_cilium_api_accesslog_proto_goTypes = []interface{}{
+ (HttpProtocol)(0), // 0: cilium.HttpProtocol
+ (EntryType)(0), // 1: cilium.EntryType
+ (*KeyValue)(nil), // 2: cilium.KeyValue
+ (*HttpLogEntry)(nil), // 3: cilium.HttpLogEntry
+ (*KafkaLogEntry)(nil), // 4: cilium.KafkaLogEntry
+ (*L7LogEntry)(nil), // 5: cilium.L7LogEntry
+ (*LogEntry)(nil), // 6: cilium.LogEntry
+ nil, // 7: cilium.L7LogEntry.FieldsEntry
+}
+var file_cilium_api_accesslog_proto_depIdxs = []int32{
+ 0, // 0: cilium.HttpLogEntry.http_protocol:type_name -> cilium.HttpProtocol
+ 2, // 1: cilium.HttpLogEntry.headers:type_name -> cilium.KeyValue
+ 2, // 2: cilium.HttpLogEntry.missing_headers:type_name -> cilium.KeyValue
+ 2, // 3: cilium.HttpLogEntry.rejected_headers:type_name -> cilium.KeyValue
+ 7, // 4: cilium.L7LogEntry.fields:type_name -> cilium.L7LogEntry.FieldsEntry
+ 1, // 5: cilium.LogEntry.entry_type:type_name -> cilium.EntryType
+ 3, // 6: cilium.LogEntry.http:type_name -> cilium.HttpLogEntry
+ 4, // 7: cilium.LogEntry.kafka:type_name -> cilium.KafkaLogEntry
+ 5, // 8: cilium.LogEntry.generic_l7:type_name -> cilium.L7LogEntry
+ 9, // [9:9] is the sub-list for method output_type
+ 9, // [9:9] is the sub-list for method input_type
+ 9, // [9:9] is the sub-list for extension type_name
+ 9, // [9:9] is the sub-list for extension extendee
+ 0, // [0:9] is the sub-list for field type_name
+}
+
+func init() { file_cilium_api_accesslog_proto_init() }
+func file_cilium_api_accesslog_proto_init() {
+ if File_cilium_api_accesslog_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_cilium_api_accesslog_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*KeyValue); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cilium_api_accesslog_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*HttpLogEntry); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cilium_api_accesslog_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*KafkaLogEntry); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cilium_api_accesslog_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*L7LogEntry); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cilium_api_accesslog_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*LogEntry); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_cilium_api_accesslog_proto_msgTypes[4].OneofWrappers = []interface{}{
+ (*LogEntry_Http)(nil),
+ (*LogEntry_Kafka)(nil),
+ (*LogEntry_GenericL7)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_cilium_api_accesslog_proto_rawDesc,
+ NumEnums: 2,
+ NumMessages: 6,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_cilium_api_accesslog_proto_goTypes,
+ DependencyIndexes: file_cilium_api_accesslog_proto_depIdxs,
+ EnumInfos: file_cilium_api_accesslog_proto_enumTypes,
+ MessageInfos: file_cilium_api_accesslog_proto_msgTypes,
+ }.Build()
+ File_cilium_api_accesslog_proto = out.File
+ file_cilium_api_accesslog_proto_rawDesc = nil
+ file_cilium_api_accesslog_proto_goTypes = nil
+ file_cilium_api_accesslog_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cilium/proxy/go/cilium/api/accesslog.pb.validate.go b/vendor/github.com/cilium/proxy/go/cilium/api/accesslog.pb.validate.go
new file mode 100644
index 000000000..37a311b7c
--- /dev/null
+++ b/vendor/github.com/cilium/proxy/go/cilium/api/accesslog.pb.validate.go
@@ -0,0 +1,808 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: cilium/api/accesslog.proto
+
+package cilium
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on KeyValue with the rules defined in the
+// proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *KeyValue) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on KeyValue with the rules defined in
+// the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in KeyValueMultiError, or nil
+// if none found.
+func (m *KeyValue) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *KeyValue) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for Key
+
+ // no validation rules for Value
+
+ if len(errors) > 0 {
+ return KeyValueMultiError(errors)
+ }
+
+ return nil
+}
+
+// KeyValueMultiError is an error wrapping multiple validation errors returned
+// by KeyValue.ValidateAll() if the designated constraints aren't met.
+type KeyValueMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m KeyValueMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m KeyValueMultiError) AllErrors() []error { return m }
+
+// KeyValueValidationError is the validation error returned by
+// KeyValue.Validate if the designated constraints aren't met.
+type KeyValueValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e KeyValueValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e KeyValueValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e KeyValueValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e KeyValueValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e KeyValueValidationError) ErrorName() string { return "KeyValueValidationError" }
+
+// Error satisfies the builtin error interface
+func (e KeyValueValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sKeyValue.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = KeyValueValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = KeyValueValidationError{}
+
+// Validate checks the field values on HttpLogEntry with the rules defined in
+// the proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *HttpLogEntry) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on HttpLogEntry with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in HttpLogEntryMultiError, or
+// nil if none found.
+func (m *HttpLogEntry) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *HttpLogEntry) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for HttpProtocol
+
+ // no validation rules for Scheme
+
+ // no validation rules for Host
+
+ // no validation rules for Path
+
+ // no validation rules for Method
+
+ for idx, item := range m.GetHeaders() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, HttpLogEntryValidationError{
+ field: fmt.Sprintf("Headers[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, HttpLogEntryValidationError{
+ field: fmt.Sprintf("Headers[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HttpLogEntryValidationError{
+ field: fmt.Sprintf("Headers[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ // no validation rules for Status
+
+ for idx, item := range m.GetMissingHeaders() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, HttpLogEntryValidationError{
+ field: fmt.Sprintf("MissingHeaders[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, HttpLogEntryValidationError{
+ field: fmt.Sprintf("MissingHeaders[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HttpLogEntryValidationError{
+ field: fmt.Sprintf("MissingHeaders[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ for idx, item := range m.GetRejectedHeaders() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, HttpLogEntryValidationError{
+ field: fmt.Sprintf("RejectedHeaders[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, HttpLogEntryValidationError{
+ field: fmt.Sprintf("RejectedHeaders[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HttpLogEntryValidationError{
+ field: fmt.Sprintf("RejectedHeaders[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return HttpLogEntryMultiError(errors)
+ }
+
+ return nil
+}
+
+// HttpLogEntryMultiError is an error wrapping multiple validation errors
+// returned by HttpLogEntry.ValidateAll() if the designated constraints aren't met.
+type HttpLogEntryMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m HttpLogEntryMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m HttpLogEntryMultiError) AllErrors() []error { return m }
+
+// HttpLogEntryValidationError is the validation error returned by
+// HttpLogEntry.Validate if the designated constraints aren't met.
+type HttpLogEntryValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e HttpLogEntryValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e HttpLogEntryValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e HttpLogEntryValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e HttpLogEntryValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e HttpLogEntryValidationError) ErrorName() string { return "HttpLogEntryValidationError" }
+
+// Error satisfies the builtin error interface
+func (e HttpLogEntryValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sHttpLogEntry.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = HttpLogEntryValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = HttpLogEntryValidationError{}
+
+// Validate checks the field values on KafkaLogEntry with the rules defined in
+// the proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *KafkaLogEntry) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on KafkaLogEntry with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in KafkaLogEntryMultiError, or
+// nil if none found.
+func (m *KafkaLogEntry) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *KafkaLogEntry) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for CorrelationId
+
+ // no validation rules for ErrorCode
+
+ // no validation rules for ApiVersion
+
+ // no validation rules for ApiKey
+
+ if len(errors) > 0 {
+ return KafkaLogEntryMultiError(errors)
+ }
+
+ return nil
+}
+
+// KafkaLogEntryMultiError is an error wrapping multiple validation errors
+// returned by KafkaLogEntry.ValidateAll() if the designated constraints
+// aren't met.
+type KafkaLogEntryMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m KafkaLogEntryMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m KafkaLogEntryMultiError) AllErrors() []error { return m }
+
+// KafkaLogEntryValidationError is the validation error returned by
+// KafkaLogEntry.Validate if the designated constraints aren't met.
+type KafkaLogEntryValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e KafkaLogEntryValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e KafkaLogEntryValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e KafkaLogEntryValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e KafkaLogEntryValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e KafkaLogEntryValidationError) ErrorName() string { return "KafkaLogEntryValidationError" }
+
+// Error satisfies the builtin error interface
+func (e KafkaLogEntryValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sKafkaLogEntry.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = KafkaLogEntryValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = KafkaLogEntryValidationError{}
+
+// Validate checks the field values on L7LogEntry with the rules defined in the
+// proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *L7LogEntry) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on L7LogEntry with the rules defined in
+// the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in L7LogEntryMultiError, or
+// nil if none found.
+func (m *L7LogEntry) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *L7LogEntry) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for Proto
+
+ // no validation rules for Fields
+
+ if len(errors) > 0 {
+ return L7LogEntryMultiError(errors)
+ }
+
+ return nil
+}
+
+// L7LogEntryMultiError is an error wrapping multiple validation errors
+// returned by L7LogEntry.ValidateAll() if the designated constraints aren't met.
+type L7LogEntryMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m L7LogEntryMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m L7LogEntryMultiError) AllErrors() []error { return m }
+
+// L7LogEntryValidationError is the validation error returned by
+// L7LogEntry.Validate if the designated constraints aren't met.
+type L7LogEntryValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e L7LogEntryValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e L7LogEntryValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e L7LogEntryValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e L7LogEntryValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e L7LogEntryValidationError) ErrorName() string { return "L7LogEntryValidationError" }
+
+// Error satisfies the builtin error interface
+func (e L7LogEntryValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sL7LogEntry.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = L7LogEntryValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = L7LogEntryValidationError{}
+
+// Validate checks the field values on LogEntry with the rules defined in the
+// proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *LogEntry) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on LogEntry with the rules defined in
+// the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in LogEntryMultiError, or nil
+// if none found.
+func (m *LogEntry) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *LogEntry) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for Timestamp
+
+ // no validation rules for IsIngress
+
+ // no validation rules for EntryType
+
+ // no validation rules for PolicyName
+
+ // no validation rules for CiliumRuleRef
+
+ // no validation rules for SourceSecurityId
+
+ // no validation rules for DestinationSecurityId
+
+ // no validation rules for SourceAddress
+
+ // no validation rules for DestinationAddress
+
+ switch v := m.L7.(type) {
+ case *LogEntry_Http:
+ if v == nil {
+ err := LogEntryValidationError{
+ field: "L7",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetHttp()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, LogEntryValidationError{
+ field: "Http",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, LogEntryValidationError{
+ field: "Http",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetHttp()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return LogEntryValidationError{
+ field: "Http",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *LogEntry_Kafka:
+ if v == nil {
+ err := LogEntryValidationError{
+ field: "L7",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetKafka()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, LogEntryValidationError{
+ field: "Kafka",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, LogEntryValidationError{
+ field: "Kafka",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetKafka()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return LogEntryValidationError{
+ field: "Kafka",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *LogEntry_GenericL7:
+ if v == nil {
+ err := LogEntryValidationError{
+ field: "L7",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetGenericL7()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, LogEntryValidationError{
+ field: "GenericL7",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, LogEntryValidationError{
+ field: "GenericL7",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetGenericL7()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return LogEntryValidationError{
+ field: "GenericL7",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ default:
+ _ = v // ensures v is used
+ }
+
+ if len(errors) > 0 {
+ return LogEntryMultiError(errors)
+ }
+
+ return nil
+}
+
+// LogEntryMultiError is an error wrapping multiple validation errors returned
+// by LogEntry.ValidateAll() if the designated constraints aren't met.
+type LogEntryMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m LogEntryMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m LogEntryMultiError) AllErrors() []error { return m }
+
+// LogEntryValidationError is the validation error returned by
+// LogEntry.Validate if the designated constraints aren't met.
+type LogEntryValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e LogEntryValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e LogEntryValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e LogEntryValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e LogEntryValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e LogEntryValidationError) ErrorName() string { return "LogEntryValidationError" }
+
+// Error satisfies the builtin error interface
+func (e LogEntryValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sLogEntry.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = LogEntryValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = LogEntryValidationError{}
diff --git a/vendor/github.com/cilium/proxy/go/cilium/api/bpf_metadata.pb.go b/vendor/github.com/cilium/proxy/go/cilium/api/bpf_metadata.pb.go
new file mode 100644
index 000000000..99a10f751
--- /dev/null
+++ b/vendor/github.com/cilium/proxy/go/cilium/api/bpf_metadata.pb.go
@@ -0,0 +1,238 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.23.0
+// protoc v4.23.1
+// source: cilium/api/bpf_metadata.proto
+
+package cilium
+
+import (
+ proto "github.com/golang/protobuf/proto"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+type BpfMetadata struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // File system root for bpf. Defaults to "/sys/fs/bpf" if left empty.
+ BpfRoot string `protobuf:"bytes,1,opt,name=bpf_root,json=bpfRoot,proto3" json:"bpf_root,omitempty"`
+ // 'true' if the filter is on ingress listener, 'false' for egress listener.
+ IsIngress bool `protobuf:"varint,2,opt,name=is_ingress,json=isIngress,proto3" json:"is_ingress,omitempty"`
+ // Use of the original source address requires kernel datapath support which
+ // may or may not be available. 'true' if original source address
+ // should be used. Original source address use may still be
+ // skipped in scenarios where it is knows to not work.
+ UseOriginalSourceAddress bool `protobuf:"varint,3,opt,name=use_original_source_address,json=useOriginalSourceAddress,proto3" json:"use_original_source_address,omitempty"`
+ // True if the listener is used for an L7 LB. In this case policy enforcement is done on the
+ // destination selected by the listener rather than on the original destination address. For
+ // local sources the source endpoint ID is set in socket mark instead of source security ID if
+ // 'use_original_source_address' is also true, so that the local source's egress policy is
+ // enforced on the bpf datapath.
+ // Only valid for egress.
+ IsL7Lb bool `protobuf:"varint,4,opt,name=is_l7lb,json=isL7lb,proto3" json:"is_l7lb,omitempty"`
+ // Source address to be used whenever the original source address is not used.
+ // Either ipv4_source_address or ipv6_source_address depending on the address
+ // family of the destination address. If left empty, and no Envoy Cluster Bind
+ // Config is provided, the source address will be picked by the local IP stack.
+ Ipv4SourceAddress string `protobuf:"bytes,5,opt,name=ipv4_source_address,json=ipv4SourceAddress,proto3" json:"ipv4_source_address,omitempty"`
+ Ipv6SourceAddress string `protobuf:"bytes,6,opt,name=ipv6_source_address,json=ipv6SourceAddress,proto3" json:"ipv6_source_address,omitempty"`
+ // True if policy should be enforced on l7 LB used. The policy bound to the configured
+ // ipv[46]_source_addresses, which must be explicitly set, applies. Ingress policy is
+ // enforced on the security identity of the original (e.g., external) source. Egress
+ // policy is enforced on the security identity of the backend selected by the load balancer.
+ //
+ // Deprecation note: This option will be forced 'true' and deprecated when Cilium 1.15 is
+ // the oldest supported release.
+ EnforcePolicyOnL7Lb bool `protobuf:"varint,7,opt,name=enforce_policy_on_l7lb,json=enforcePolicyOnL7lb,proto3" json:"enforce_policy_on_l7lb,omitempty"`
+}
+
+func (x *BpfMetadata) Reset() {
+ *x = BpfMetadata{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cilium_api_bpf_metadata_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *BpfMetadata) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*BpfMetadata) ProtoMessage() {}
+
+func (x *BpfMetadata) ProtoReflect() protoreflect.Message {
+ mi := &file_cilium_api_bpf_metadata_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use BpfMetadata.ProtoReflect.Descriptor instead.
+func (*BpfMetadata) Descriptor() ([]byte, []int) {
+ return file_cilium_api_bpf_metadata_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *BpfMetadata) GetBpfRoot() string {
+ if x != nil {
+ return x.BpfRoot
+ }
+ return ""
+}
+
+func (x *BpfMetadata) GetIsIngress() bool {
+ if x != nil {
+ return x.IsIngress
+ }
+ return false
+}
+
+func (x *BpfMetadata) GetUseOriginalSourceAddress() bool {
+ if x != nil {
+ return x.UseOriginalSourceAddress
+ }
+ return false
+}
+
+func (x *BpfMetadata) GetIsL7Lb() bool {
+ if x != nil {
+ return x.IsL7Lb
+ }
+ return false
+}
+
+func (x *BpfMetadata) GetIpv4SourceAddress() string {
+ if x != nil {
+ return x.Ipv4SourceAddress
+ }
+ return ""
+}
+
+func (x *BpfMetadata) GetIpv6SourceAddress() string {
+ if x != nil {
+ return x.Ipv6SourceAddress
+ }
+ return ""
+}
+
+func (x *BpfMetadata) GetEnforcePolicyOnL7Lb() bool {
+ if x != nil {
+ return x.EnforcePolicyOnL7Lb
+ }
+ return false
+}
+
+var File_cilium_api_bpf_metadata_proto protoreflect.FileDescriptor
+
+var file_cilium_api_bpf_metadata_proto_rawDesc = []byte{
+ 0x0a, 0x1d, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x62, 0x70, 0x66,
+ 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12,
+ 0x06, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x22, 0xb4, 0x02, 0x0a, 0x0b, 0x42, 0x70, 0x66, 0x4d,
+ 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x19, 0x0a, 0x08, 0x62, 0x70, 0x66, 0x5f, 0x72,
+ 0x6f, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x62, 0x70, 0x66, 0x52, 0x6f,
+ 0x6f, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x73, 0x5f, 0x69, 0x6e, 0x67, 0x72, 0x65, 0x73, 0x73,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x69, 0x73, 0x49, 0x6e, 0x67, 0x72, 0x65, 0x73,
+ 0x73, 0x12, 0x3d, 0x0a, 0x1b, 0x75, 0x73, 0x65, 0x5f, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61,
+ 0x6c, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x75, 0x73, 0x65, 0x4f, 0x72, 0x69, 0x67, 0x69,
+ 0x6e, 0x61, 0x6c, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73,
+ 0x12, 0x17, 0x0a, 0x07, 0x69, 0x73, 0x5f, 0x6c, 0x37, 0x6c, 0x62, 0x18, 0x04, 0x20, 0x01, 0x28,
+ 0x08, 0x52, 0x06, 0x69, 0x73, 0x4c, 0x37, 0x6c, 0x62, 0x12, 0x2e, 0x0a, 0x13, 0x69, 0x70, 0x76,
+ 0x34, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73,
+ 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x69, 0x70, 0x76, 0x34, 0x53, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x69, 0x70, 0x76,
+ 0x36, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73,
+ 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x69, 0x70, 0x76, 0x36, 0x53, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x33, 0x0a, 0x16, 0x65, 0x6e, 0x66,
+ 0x6f, 0x72, 0x63, 0x65, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x6f, 0x6e, 0x5f, 0x6c,
+ 0x37, 0x6c, 0x62, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x65, 0x6e, 0x66, 0x6f, 0x72,
+ 0x63, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4f, 0x6e, 0x4c, 0x37, 0x6c, 0x62, 0x42, 0x2e,
+ 0x5a, 0x2c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x69, 0x6c,
+ 0x69, 0x75, 0x6d, 0x2f, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2f, 0x63, 0x69, 0x6c,
+ 0x69, 0x75, 0x6d, 0x2f, 0x61, 0x70, 0x69, 0x3b, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x62, 0x06,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_cilium_api_bpf_metadata_proto_rawDescOnce sync.Once
+ file_cilium_api_bpf_metadata_proto_rawDescData = file_cilium_api_bpf_metadata_proto_rawDesc
+)
+
+func file_cilium_api_bpf_metadata_proto_rawDescGZIP() []byte {
+ file_cilium_api_bpf_metadata_proto_rawDescOnce.Do(func() {
+ file_cilium_api_bpf_metadata_proto_rawDescData = protoimpl.X.CompressGZIP(file_cilium_api_bpf_metadata_proto_rawDescData)
+ })
+ return file_cilium_api_bpf_metadata_proto_rawDescData
+}
+
+var file_cilium_api_bpf_metadata_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_cilium_api_bpf_metadata_proto_goTypes = []interface{}{
+ (*BpfMetadata)(nil), // 0: cilium.BpfMetadata
+}
+var file_cilium_api_bpf_metadata_proto_depIdxs = []int32{
+ 0, // [0:0] is the sub-list for method output_type
+ 0, // [0:0] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_cilium_api_bpf_metadata_proto_init() }
+func file_cilium_api_bpf_metadata_proto_init() {
+ if File_cilium_api_bpf_metadata_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_cilium_api_bpf_metadata_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*BpfMetadata); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_cilium_api_bpf_metadata_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_cilium_api_bpf_metadata_proto_goTypes,
+ DependencyIndexes: file_cilium_api_bpf_metadata_proto_depIdxs,
+ MessageInfos: file_cilium_api_bpf_metadata_proto_msgTypes,
+ }.Build()
+ File_cilium_api_bpf_metadata_proto = out.File
+ file_cilium_api_bpf_metadata_proto_rawDesc = nil
+ file_cilium_api_bpf_metadata_proto_goTypes = nil
+ file_cilium_api_bpf_metadata_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cilium/proxy/go/cilium/api/bpf_metadata.pb.validate.go b/vendor/github.com/cilium/proxy/go/cilium/api/bpf_metadata.pb.validate.go
new file mode 100644
index 000000000..18dbb0aed
--- /dev/null
+++ b/vendor/github.com/cilium/proxy/go/cilium/api/bpf_metadata.pb.validate.go
@@ -0,0 +1,149 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: cilium/api/bpf_metadata.proto
+
+package cilium
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on BpfMetadata with the rules defined in
+// the proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *BpfMetadata) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on BpfMetadata with the rules defined in
+// the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in BpfMetadataMultiError, or
+// nil if none found.
+func (m *BpfMetadata) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *BpfMetadata) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for BpfRoot
+
+ // no validation rules for IsIngress
+
+ // no validation rules for UseOriginalSourceAddress
+
+ // no validation rules for IsL7Lb
+
+ // no validation rules for Ipv4SourceAddress
+
+ // no validation rules for Ipv6SourceAddress
+
+ // no validation rules for EnforcePolicyOnL7Lb
+
+ if len(errors) > 0 {
+ return BpfMetadataMultiError(errors)
+ }
+
+ return nil
+}
+
+// BpfMetadataMultiError is an error wrapping multiple validation errors
+// returned by BpfMetadata.ValidateAll() if the designated constraints aren't met.
+type BpfMetadataMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m BpfMetadataMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m BpfMetadataMultiError) AllErrors() []error { return m }
+
+// BpfMetadataValidationError is the validation error returned by
+// BpfMetadata.Validate if the designated constraints aren't met.
+type BpfMetadataValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e BpfMetadataValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e BpfMetadataValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e BpfMetadataValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e BpfMetadataValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e BpfMetadataValidationError) ErrorName() string { return "BpfMetadataValidationError" }
+
+// Error satisfies the builtin error interface
+func (e BpfMetadataValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sBpfMetadata.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = BpfMetadataValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = BpfMetadataValidationError{}
diff --git a/vendor/github.com/cilium/proxy/go/cilium/api/health_check_sink.pb.go b/vendor/github.com/cilium/proxy/go/cilium/api/health_check_sink.pb.go
new file mode 100644
index 000000000..f96b0168d
--- /dev/null
+++ b/vendor/github.com/cilium/proxy/go/cilium/api/health_check_sink.pb.go
@@ -0,0 +1,157 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.23.0
+// protoc v4.23.1
+// source: cilium/api/health_check_sink.proto
+
+package cilium
+
+import (
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ proto "github.com/golang/protobuf/proto"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+// Health check event pipe sink.
+// The health check event will be streamed as binary protobufs.
+type HealthCheckEventPipeSink struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Unix domain socket path where to connect to send health check events to.
+ Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
+}
+
+func (x *HealthCheckEventPipeSink) Reset() {
+ *x = HealthCheckEventPipeSink{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cilium_api_health_check_sink_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *HealthCheckEventPipeSink) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HealthCheckEventPipeSink) ProtoMessage() {}
+
+func (x *HealthCheckEventPipeSink) ProtoReflect() protoreflect.Message {
+ mi := &file_cilium_api_health_check_sink_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HealthCheckEventPipeSink.ProtoReflect.Descriptor instead.
+func (*HealthCheckEventPipeSink) Descriptor() ([]byte, []int) {
+ return file_cilium_api_health_check_sink_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *HealthCheckEventPipeSink) GetPath() string {
+ if x != nil {
+ return x.Path
+ }
+ return ""
+}
+
+var File_cilium_api_health_check_sink_proto protoreflect.FileDescriptor
+
+var file_cilium_api_health_check_sink_proto_rawDesc = []byte{
+ 0x0a, 0x22, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x68, 0x65, 0x61,
+ 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x73, 0x69, 0x6e, 0x6b, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x1a, 0x17, 0x76, 0x61,
+ 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x37, 0x0a, 0x18, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43,
+ 0x68, 0x65, 0x63, 0x6b, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x50, 0x69, 0x70, 0x65, 0x53, 0x69, 0x6e,
+ 0x6b, 0x12, 0x1b, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42,
+ 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x42, 0x2e,
+ 0x5a, 0x2c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x69, 0x6c,
+ 0x69, 0x75, 0x6d, 0x2f, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2f, 0x63, 0x69, 0x6c,
+ 0x69, 0x75, 0x6d, 0x2f, 0x61, 0x70, 0x69, 0x3b, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x62, 0x06,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_cilium_api_health_check_sink_proto_rawDescOnce sync.Once
+ file_cilium_api_health_check_sink_proto_rawDescData = file_cilium_api_health_check_sink_proto_rawDesc
+)
+
+func file_cilium_api_health_check_sink_proto_rawDescGZIP() []byte {
+ file_cilium_api_health_check_sink_proto_rawDescOnce.Do(func() {
+ file_cilium_api_health_check_sink_proto_rawDescData = protoimpl.X.CompressGZIP(file_cilium_api_health_check_sink_proto_rawDescData)
+ })
+ return file_cilium_api_health_check_sink_proto_rawDescData
+}
+
+var file_cilium_api_health_check_sink_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_cilium_api_health_check_sink_proto_goTypes = []interface{}{
+ (*HealthCheckEventPipeSink)(nil), // 0: cilium.HealthCheckEventPipeSink
+}
+var file_cilium_api_health_check_sink_proto_depIdxs = []int32{
+ 0, // [0:0] is the sub-list for method output_type
+ 0, // [0:0] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_cilium_api_health_check_sink_proto_init() }
+func file_cilium_api_health_check_sink_proto_init() {
+ if File_cilium_api_health_check_sink_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_cilium_api_health_check_sink_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*HealthCheckEventPipeSink); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_cilium_api_health_check_sink_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_cilium_api_health_check_sink_proto_goTypes,
+ DependencyIndexes: file_cilium_api_health_check_sink_proto_depIdxs,
+ MessageInfos: file_cilium_api_health_check_sink_proto_msgTypes,
+ }.Build()
+ File_cilium_api_health_check_sink_proto = out.File
+ file_cilium_api_health_check_sink_proto_rawDesc = nil
+ file_cilium_api_health_check_sink_proto_goTypes = nil
+ file_cilium_api_health_check_sink_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cilium/proxy/go/cilium/api/health_check_sink.pb.validate.go b/vendor/github.com/cilium/proxy/go/cilium/api/health_check_sink.pb.validate.go
new file mode 100644
index 000000000..5931df1ba
--- /dev/null
+++ b/vendor/github.com/cilium/proxy/go/cilium/api/health_check_sink.pb.validate.go
@@ -0,0 +1,149 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: cilium/api/health_check_sink.proto
+
+package cilium
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on HealthCheckEventPipeSink with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *HealthCheckEventPipeSink) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on HealthCheckEventPipeSink with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// HealthCheckEventPipeSinkMultiError, or nil if none found.
+func (m *HealthCheckEventPipeSink) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *HealthCheckEventPipeSink) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if utf8.RuneCountInString(m.GetPath()) < 1 {
+ err := HealthCheckEventPipeSinkValidationError{
+ field: "Path",
+ reason: "value length must be at least 1 runes",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return HealthCheckEventPipeSinkMultiError(errors)
+ }
+
+ return nil
+}
+
+// HealthCheckEventPipeSinkMultiError is an error wrapping multiple validation
+// errors returned by HealthCheckEventPipeSink.ValidateAll() if the designated
+// constraints aren't met.
+type HealthCheckEventPipeSinkMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m HealthCheckEventPipeSinkMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m HealthCheckEventPipeSinkMultiError) AllErrors() []error { return m }
+
+// HealthCheckEventPipeSinkValidationError is the validation error returned by
+// HealthCheckEventPipeSink.Validate if the designated constraints aren't met.
+type HealthCheckEventPipeSinkValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e HealthCheckEventPipeSinkValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e HealthCheckEventPipeSinkValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e HealthCheckEventPipeSinkValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e HealthCheckEventPipeSinkValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e HealthCheckEventPipeSinkValidationError) ErrorName() string {
+ return "HealthCheckEventPipeSinkValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e HealthCheckEventPipeSinkValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sHealthCheckEventPipeSink.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = HealthCheckEventPipeSinkValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = HealthCheckEventPipeSinkValidationError{}
diff --git a/vendor/github.com/cilium/proxy/go/cilium/api/l7policy.pb.go b/vendor/github.com/cilium/proxy/go/cilium/api/l7policy.pb.go
new file mode 100644
index 000000000..eccf77109
--- /dev/null
+++ b/vendor/github.com/cilium/proxy/go/cilium/api/l7policy.pb.go
@@ -0,0 +1,167 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.23.0
+// protoc v4.23.1
+// source: cilium/api/l7policy.proto
+
+package cilium
+
+import (
+ proto "github.com/golang/protobuf/proto"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ _ "google.golang.org/protobuf/types/known/wrapperspb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+type L7Policy struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Path to the unix domain socket for the cilium access log.
+ AccessLogPath string `protobuf:"bytes,1,opt,name=access_log_path,json=accessLogPath,proto3" json:"access_log_path,omitempty"`
+ // HTTP response body message for 403 status code.
+ // If empty, "Access denied" will be used.
+ Denied_403Body string `protobuf:"bytes,3,opt,name=denied_403_body,json=denied403Body,proto3" json:"denied_403_body,omitempty"`
+}
+
+func (x *L7Policy) Reset() {
+ *x = L7Policy{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cilium_api_l7policy_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *L7Policy) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*L7Policy) ProtoMessage() {}
+
+func (x *L7Policy) ProtoReflect() protoreflect.Message {
+ mi := &file_cilium_api_l7policy_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use L7Policy.ProtoReflect.Descriptor instead.
+func (*L7Policy) Descriptor() ([]byte, []int) {
+ return file_cilium_api_l7policy_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *L7Policy) GetAccessLogPath() string {
+ if x != nil {
+ return x.AccessLogPath
+ }
+ return ""
+}
+
+func (x *L7Policy) GetDenied_403Body() string {
+ if x != nil {
+ return x.Denied_403Body
+ }
+ return ""
+}
+
+var File_cilium_api_l7policy_proto protoreflect.FileDescriptor
+
+var file_cilium_api_l7policy_proto_rawDesc = []byte{
+ 0x0a, 0x19, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6c, 0x37, 0x70,
+ 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x63, 0x69, 0x6c,
+ 0x69, 0x75, 0x6d, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x22, 0x5a, 0x0a, 0x08, 0x4c, 0x37, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12,
+ 0x26, 0x0a, 0x0f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x70, 0x61,
+ 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73,
+ 0x4c, 0x6f, 0x67, 0x50, 0x61, 0x74, 0x68, 0x12, 0x26, 0x0a, 0x0f, 0x64, 0x65, 0x6e, 0x69, 0x65,
+ 0x64, 0x5f, 0x34, 0x30, 0x33, 0x5f, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x0d, 0x64, 0x65, 0x6e, 0x69, 0x65, 0x64, 0x34, 0x30, 0x33, 0x42, 0x6f, 0x64, 0x79, 0x42,
+ 0x2e, 0x5a, 0x2c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x69,
+ 0x6c, 0x69, 0x75, 0x6d, 0x2f, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2f, 0x63, 0x69,
+ 0x6c, 0x69, 0x75, 0x6d, 0x2f, 0x61, 0x70, 0x69, 0x3b, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x62,
+ 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_cilium_api_l7policy_proto_rawDescOnce sync.Once
+ file_cilium_api_l7policy_proto_rawDescData = file_cilium_api_l7policy_proto_rawDesc
+)
+
+func file_cilium_api_l7policy_proto_rawDescGZIP() []byte {
+ file_cilium_api_l7policy_proto_rawDescOnce.Do(func() {
+ file_cilium_api_l7policy_proto_rawDescData = protoimpl.X.CompressGZIP(file_cilium_api_l7policy_proto_rawDescData)
+ })
+ return file_cilium_api_l7policy_proto_rawDescData
+}
+
+var file_cilium_api_l7policy_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_cilium_api_l7policy_proto_goTypes = []interface{}{
+ (*L7Policy)(nil), // 0: cilium.L7Policy
+}
+var file_cilium_api_l7policy_proto_depIdxs = []int32{
+ 0, // [0:0] is the sub-list for method output_type
+ 0, // [0:0] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_cilium_api_l7policy_proto_init() }
+func file_cilium_api_l7policy_proto_init() {
+ if File_cilium_api_l7policy_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_cilium_api_l7policy_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*L7Policy); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_cilium_api_l7policy_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_cilium_api_l7policy_proto_goTypes,
+ DependencyIndexes: file_cilium_api_l7policy_proto_depIdxs,
+ MessageInfos: file_cilium_api_l7policy_proto_msgTypes,
+ }.Build()
+ File_cilium_api_l7policy_proto = out.File
+ file_cilium_api_l7policy_proto_rawDesc = nil
+ file_cilium_api_l7policy_proto_goTypes = nil
+ file_cilium_api_l7policy_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cilium/proxy/go/cilium/api/l7policy.pb.validate.go b/vendor/github.com/cilium/proxy/go/cilium/api/l7policy.pb.validate.go
new file mode 100644
index 000000000..a9b1cc932
--- /dev/null
+++ b/vendor/github.com/cilium/proxy/go/cilium/api/l7policy.pb.validate.go
@@ -0,0 +1,139 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: cilium/api/l7policy.proto
+
+package cilium
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on L7Policy with the rules defined in the
+// proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *L7Policy) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on L7Policy with the rules defined in
+// the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in L7PolicyMultiError, or nil
+// if none found.
+func (m *L7Policy) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *L7Policy) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for AccessLogPath
+
+ // no validation rules for Denied_403Body
+
+ if len(errors) > 0 {
+ return L7PolicyMultiError(errors)
+ }
+
+ return nil
+}
+
+// L7PolicyMultiError is an error wrapping multiple validation errors returned
+// by L7Policy.ValidateAll() if the designated constraints aren't met.
+type L7PolicyMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m L7PolicyMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m L7PolicyMultiError) AllErrors() []error { return m }
+
+// L7PolicyValidationError is the validation error returned by
+// L7Policy.Validate if the designated constraints aren't met.
+type L7PolicyValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e L7PolicyValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e L7PolicyValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e L7PolicyValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e L7PolicyValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e L7PolicyValidationError) ErrorName() string { return "L7PolicyValidationError" }
+
+// Error satisfies the builtin error interface
+func (e L7PolicyValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sL7Policy.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = L7PolicyValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = L7PolicyValidationError{}
diff --git a/vendor/github.com/cilium/proxy/go/cilium/api/network_filter.pb.go b/vendor/github.com/cilium/proxy/go/cilium/api/network_filter.pb.go
new file mode 100644
index 000000000..4868b7f63
--- /dev/null
+++ b/vendor/github.com/cilium/proxy/go/cilium/api/network_filter.pb.go
@@ -0,0 +1,183 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.23.0
+// protoc v4.23.1
+// source: cilium/api/network_filter.proto
+
+package cilium
+
+import (
+ proto "github.com/golang/protobuf/proto"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+type NetworkFilter struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Path to the proxylib to be opened
+ Proxylib string `protobuf:"bytes,1,opt,name=proxylib,proto3" json:"proxylib,omitempty"`
+ // Transparent set of parameters provided for proxylib initialization
+ ProxylibParams map[string]string `protobuf:"bytes,2,rep,name=proxylib_params,json=proxylibParams,proto3" json:"proxylib_params,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ // Path to the unix domain socket for the cilium access log.
+ AccessLogPath string `protobuf:"bytes,5,opt,name=access_log_path,json=accessLogPath,proto3" json:"access_log_path,omitempty"`
+}
+
+func (x *NetworkFilter) Reset() {
+ *x = NetworkFilter{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cilium_api_network_filter_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *NetworkFilter) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*NetworkFilter) ProtoMessage() {}
+
+func (x *NetworkFilter) ProtoReflect() protoreflect.Message {
+ mi := &file_cilium_api_network_filter_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use NetworkFilter.ProtoReflect.Descriptor instead.
+func (*NetworkFilter) Descriptor() ([]byte, []int) {
+ return file_cilium_api_network_filter_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *NetworkFilter) GetProxylib() string {
+ if x != nil {
+ return x.Proxylib
+ }
+ return ""
+}
+
+func (x *NetworkFilter) GetProxylibParams() map[string]string {
+ if x != nil {
+ return x.ProxylibParams
+ }
+ return nil
+}
+
+func (x *NetworkFilter) GetAccessLogPath() string {
+ if x != nil {
+ return x.AccessLogPath
+ }
+ return ""
+}
+
+var File_cilium_api_network_filter_proto protoreflect.FileDescriptor
+
+var file_cilium_api_network_filter_proto_rawDesc = []byte{
+ 0x0a, 0x1f, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6e, 0x65, 0x74,
+ 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x12, 0x06, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x22, 0xea, 0x01, 0x0a, 0x0d, 0x4e, 0x65,
+ 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x70,
+ 0x72, 0x6f, 0x78, 0x79, 0x6c, 0x69, 0x62, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70,
+ 0x72, 0x6f, 0x78, 0x79, 0x6c, 0x69, 0x62, 0x12, 0x52, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x78, 0x79,
+ 0x6c, 0x69, 0x62, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x29, 0x2e, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2e, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72,
+ 0x6b, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x6c, 0x69, 0x62,
+ 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x70, 0x72, 0x6f,
+ 0x78, 0x79, 0x6c, 0x69, 0x62, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x61,
+ 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x05,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x50,
+ 0x61, 0x74, 0x68, 0x1a, 0x41, 0x0a, 0x13, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x6c, 0x69, 0x62, 0x50,
+ 0x61, 0x72, 0x61, 0x6d, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65,
+ 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x2e, 0x5a, 0x2c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62,
+ 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2f, 0x70, 0x72, 0x6f, 0x78,
+ 0x79, 0x2f, 0x67, 0x6f, 0x2f, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2f, 0x61, 0x70, 0x69, 0x3b,
+ 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_cilium_api_network_filter_proto_rawDescOnce sync.Once
+ file_cilium_api_network_filter_proto_rawDescData = file_cilium_api_network_filter_proto_rawDesc
+)
+
+func file_cilium_api_network_filter_proto_rawDescGZIP() []byte {
+ file_cilium_api_network_filter_proto_rawDescOnce.Do(func() {
+ file_cilium_api_network_filter_proto_rawDescData = protoimpl.X.CompressGZIP(file_cilium_api_network_filter_proto_rawDescData)
+ })
+ return file_cilium_api_network_filter_proto_rawDescData
+}
+
+var file_cilium_api_network_filter_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_cilium_api_network_filter_proto_goTypes = []interface{}{
+ (*NetworkFilter)(nil), // 0: cilium.NetworkFilter
+ nil, // 1: cilium.NetworkFilter.ProxylibParamsEntry
+}
+var file_cilium_api_network_filter_proto_depIdxs = []int32{
+ 1, // 0: cilium.NetworkFilter.proxylib_params:type_name -> cilium.NetworkFilter.ProxylibParamsEntry
+ 1, // [1:1] is the sub-list for method output_type
+ 1, // [1:1] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 1, // [1:1] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_cilium_api_network_filter_proto_init() }
+func file_cilium_api_network_filter_proto_init() {
+ if File_cilium_api_network_filter_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_cilium_api_network_filter_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*NetworkFilter); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_cilium_api_network_filter_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 2,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_cilium_api_network_filter_proto_goTypes,
+ DependencyIndexes: file_cilium_api_network_filter_proto_depIdxs,
+ MessageInfos: file_cilium_api_network_filter_proto_msgTypes,
+ }.Build()
+ File_cilium_api_network_filter_proto = out.File
+ file_cilium_api_network_filter_proto_rawDesc = nil
+ file_cilium_api_network_filter_proto_goTypes = nil
+ file_cilium_api_network_filter_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cilium/proxy/go/cilium/api/network_filter.pb.validate.go b/vendor/github.com/cilium/proxy/go/cilium/api/network_filter.pb.validate.go
new file mode 100644
index 000000000..fec4b8b71
--- /dev/null
+++ b/vendor/github.com/cilium/proxy/go/cilium/api/network_filter.pb.validate.go
@@ -0,0 +1,142 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: cilium/api/network_filter.proto
+
+package cilium
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on NetworkFilter with the rules defined in
+// the proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *NetworkFilter) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on NetworkFilter with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in NetworkFilterMultiError, or
+// nil if none found.
+func (m *NetworkFilter) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *NetworkFilter) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for Proxylib
+
+ // no validation rules for ProxylibParams
+
+ // no validation rules for AccessLogPath
+
+ if len(errors) > 0 {
+ return NetworkFilterMultiError(errors)
+ }
+
+ return nil
+}
+
+// NetworkFilterMultiError is an error wrapping multiple validation errors
+// returned by NetworkFilter.ValidateAll() if the designated constraints
+// aren't met.
+type NetworkFilterMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m NetworkFilterMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m NetworkFilterMultiError) AllErrors() []error { return m }
+
+// NetworkFilterValidationError is the validation error returned by
+// NetworkFilter.Validate if the designated constraints aren't met.
+type NetworkFilterValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e NetworkFilterValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e NetworkFilterValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e NetworkFilterValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e NetworkFilterValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e NetworkFilterValidationError) ErrorName() string { return "NetworkFilterValidationError" }
+
+// Error satisfies the builtin error interface
+func (e NetworkFilterValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sNetworkFilter.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = NetworkFilterValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = NetworkFilterValidationError{}
diff --git a/vendor/github.com/cilium/proxy/go/cilium/api/npds.pb.go b/vendor/github.com/cilium/proxy/go/cilium/api/npds.pb.go
new file mode 100644
index 000000000..150663120
--- /dev/null
+++ b/vendor/github.com/cilium/proxy/go/cilium/api/npds.pb.go
@@ -0,0 +1,1761 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.23.0
+// protoc v4.23.1
+// source: cilium/api/npds.proto
+
+package cilium
+
+import (
+ context "context"
+ _ "github.com/cilium/proxy/go/envoy/annotations"
+ v3 "github.com/cilium/proxy/go/envoy/config/core/v3"
+ v31 "github.com/cilium/proxy/go/envoy/config/route/v3"
+ v33 "github.com/cilium/proxy/go/envoy/service/discovery/v3"
+ v32 "github.com/cilium/proxy/go/envoy/type/matcher/v3"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ proto "github.com/golang/protobuf/proto"
+ _ "google.golang.org/genproto/googleapis/api/annotations"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+// Action specifies what to do when the header matches.
+type HeaderMatch_MatchAction int32
+
+const (
+ HeaderMatch_CONTINUE_ON_MATCH HeaderMatch_MatchAction = 0 // Keep checking other matches (default)
+ HeaderMatch_FAIL_ON_MATCH HeaderMatch_MatchAction = 1 // Drop the request if no other rule matches
+ HeaderMatch_DELETE_ON_MATCH HeaderMatch_MatchAction = 2 // Remove the whole matching header
+)
+
+// Enum value maps for HeaderMatch_MatchAction.
+var (
+ HeaderMatch_MatchAction_name = map[int32]string{
+ 0: "CONTINUE_ON_MATCH",
+ 1: "FAIL_ON_MATCH",
+ 2: "DELETE_ON_MATCH",
+ }
+ HeaderMatch_MatchAction_value = map[string]int32{
+ "CONTINUE_ON_MATCH": 0,
+ "FAIL_ON_MATCH": 1,
+ "DELETE_ON_MATCH": 2,
+ }
+)
+
+func (x HeaderMatch_MatchAction) Enum() *HeaderMatch_MatchAction {
+ p := new(HeaderMatch_MatchAction)
+ *p = x
+ return p
+}
+
+func (x HeaderMatch_MatchAction) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (HeaderMatch_MatchAction) Descriptor() protoreflect.EnumDescriptor {
+ return file_cilium_api_npds_proto_enumTypes[0].Descriptor()
+}
+
+func (HeaderMatch_MatchAction) Type() protoreflect.EnumType {
+ return &file_cilium_api_npds_proto_enumTypes[0]
+}
+
+func (x HeaderMatch_MatchAction) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use HeaderMatch_MatchAction.Descriptor instead.
+func (HeaderMatch_MatchAction) EnumDescriptor() ([]byte, []int) {
+ return file_cilium_api_npds_proto_rawDescGZIP(), []int{5, 0}
+}
+
+type HeaderMatch_MismatchAction int32
+
+const (
+ HeaderMatch_FAIL_ON_MISMATCH HeaderMatch_MismatchAction = 0 // Drop the request if no other rule matches (default)
+ HeaderMatch_CONTINUE_ON_MISMATCH HeaderMatch_MismatchAction = 1 // Keep checking other matches, log the mismatch
+ HeaderMatch_ADD_ON_MISMATCH HeaderMatch_MismatchAction = 2 // Add 'value' to the multivalued header
+ HeaderMatch_DELETE_ON_MISMATCH HeaderMatch_MismatchAction = 3 // Remove the whole mismatching header
+ HeaderMatch_REPLACE_ON_MISMATCH HeaderMatch_MismatchAction = 4 // Replace the whole mismatching header with 'value'
+)
+
+// Enum value maps for HeaderMatch_MismatchAction.
+var (
+ HeaderMatch_MismatchAction_name = map[int32]string{
+ 0: "FAIL_ON_MISMATCH",
+ 1: "CONTINUE_ON_MISMATCH",
+ 2: "ADD_ON_MISMATCH",
+ 3: "DELETE_ON_MISMATCH",
+ 4: "REPLACE_ON_MISMATCH",
+ }
+ HeaderMatch_MismatchAction_value = map[string]int32{
+ "FAIL_ON_MISMATCH": 0,
+ "CONTINUE_ON_MISMATCH": 1,
+ "ADD_ON_MISMATCH": 2,
+ "DELETE_ON_MISMATCH": 3,
+ "REPLACE_ON_MISMATCH": 4,
+ }
+)
+
+func (x HeaderMatch_MismatchAction) Enum() *HeaderMatch_MismatchAction {
+ p := new(HeaderMatch_MismatchAction)
+ *p = x
+ return p
+}
+
+func (x HeaderMatch_MismatchAction) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (HeaderMatch_MismatchAction) Descriptor() protoreflect.EnumDescriptor {
+ return file_cilium_api_npds_proto_enumTypes[1].Descriptor()
+}
+
+func (HeaderMatch_MismatchAction) Type() protoreflect.EnumType {
+ return &file_cilium_api_npds_proto_enumTypes[1]
+}
+
+func (x HeaderMatch_MismatchAction) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use HeaderMatch_MismatchAction.Descriptor instead.
+func (HeaderMatch_MismatchAction) EnumDescriptor() ([]byte, []int) {
+ return file_cilium_api_npds_proto_rawDescGZIP(), []int{5, 1}
+}
+
+// A network policy that is enforced by a filter on the network flows to/from
+// associated hosts.
+type NetworkPolicy struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // IPs of the endpoint to which this policy applies.
+ // Required.
+ EndpointIps []string `protobuf:"bytes,1,rep,name=endpoint_ips,json=endpointIps,proto3" json:"endpoint_ips,omitempty"`
+ // The endpoint identifier associated with the network policy.
+ // Required.
+ EndpointId uint64 `protobuf:"varint,2,opt,name=endpoint_id,json=endpointId,proto3" json:"endpoint_id,omitempty"`
+ // The part of the policy to be enforced at ingress by the filter, as a set
+ // of per-port network policies, one per destination L4 port.
+ // Every PortNetworkPolicy element in this set has a unique port / protocol
+ // combination.
+ // Optional. If empty, all flows in this direction are denied.
+ IngressPerPortPolicies []*PortNetworkPolicy `protobuf:"bytes,3,rep,name=ingress_per_port_policies,json=ingressPerPortPolicies,proto3" json:"ingress_per_port_policies,omitempty"`
+ // The part of the policy to be enforced at egress by the filter, as a set
+ // of per-port network policies, one per destination L4 port.
+ // Every PortNetworkPolicy element in this set has a unique port / protocol
+ // combination.
+ // Optional. If empty, all flows in this direction are denied.
+ EgressPerPortPolicies []*PortNetworkPolicy `protobuf:"bytes,4,rep,name=egress_per_port_policies,json=egressPerPortPolicies,proto3" json:"egress_per_port_policies,omitempty"`
+ // Name of the conntrack map to use with this policy.
+ // The paths to various Cilium conntrack maps are derived using this name.
+ // Optional. If empty, ipcache or hostmap lookup is used instead of conntrack
+ // map.
+ ConntrackMapName string `protobuf:"bytes,5,opt,name=conntrack_map_name,json=conntrackMapName,proto3" json:"conntrack_map_name,omitempty"`
+}
+
+func (x *NetworkPolicy) Reset() {
+ *x = NetworkPolicy{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cilium_api_npds_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *NetworkPolicy) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*NetworkPolicy) ProtoMessage() {}
+
+func (x *NetworkPolicy) ProtoReflect() protoreflect.Message {
+ mi := &file_cilium_api_npds_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use NetworkPolicy.ProtoReflect.Descriptor instead.
+func (*NetworkPolicy) Descriptor() ([]byte, []int) {
+ return file_cilium_api_npds_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *NetworkPolicy) GetEndpointIps() []string {
+ if x != nil {
+ return x.EndpointIps
+ }
+ return nil
+}
+
+func (x *NetworkPolicy) GetEndpointId() uint64 {
+ if x != nil {
+ return x.EndpointId
+ }
+ return 0
+}
+
+func (x *NetworkPolicy) GetIngressPerPortPolicies() []*PortNetworkPolicy {
+ if x != nil {
+ return x.IngressPerPortPolicies
+ }
+ return nil
+}
+
+func (x *NetworkPolicy) GetEgressPerPortPolicies() []*PortNetworkPolicy {
+ if x != nil {
+ return x.EgressPerPortPolicies
+ }
+ return nil
+}
+
+func (x *NetworkPolicy) GetConntrackMapName() string {
+ if x != nil {
+ return x.ConntrackMapName
+ }
+ return ""
+}
+
+// A network policy to whitelist flows to a specific destination L4 port,
+// as a conjunction of predicates on L3/L4/L7 flows.
+// If all the predicates of a policy match a flow, the flow is whitelisted.
+type PortNetworkPolicy struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The flows' destination L4 port number, as an unsigned 16-bit integer.
+ // If 0, all destination L4 port numbers are matched by this predicate.
+ Port uint32 `protobuf:"varint,1,opt,name=port,proto3" json:"port,omitempty"`
+ // The end of the destination port range, if non-zero.
+ EndPort uint32 `protobuf:"varint,4,opt,name=end_port,json=endPort,proto3" json:"end_port,omitempty"`
+ // The flows' L4 transport protocol.
+ // Required.
+ Protocol v3.SocketAddress_Protocol `protobuf:"varint,2,opt,name=protocol,proto3,enum=envoy.config.core.v3.SocketAddress_Protocol" json:"protocol,omitempty"`
+ // The network policy rules to be enforced on the flows to the port.
+ // Optional. A flow is matched by this predicate if either the set of
+ // rules is empty or any of the rules matches it.
+ Rules []*PortNetworkPolicyRule `protobuf:"bytes,3,rep,name=rules,proto3" json:"rules,omitempty"`
+}
+
+func (x *PortNetworkPolicy) Reset() {
+ *x = PortNetworkPolicy{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cilium_api_npds_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *PortNetworkPolicy) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*PortNetworkPolicy) ProtoMessage() {}
+
+func (x *PortNetworkPolicy) ProtoReflect() protoreflect.Message {
+ mi := &file_cilium_api_npds_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use PortNetworkPolicy.ProtoReflect.Descriptor instead.
+func (*PortNetworkPolicy) Descriptor() ([]byte, []int) {
+ return file_cilium_api_npds_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *PortNetworkPolicy) GetPort() uint32 {
+ if x != nil {
+ return x.Port
+ }
+ return 0
+}
+
+func (x *PortNetworkPolicy) GetEndPort() uint32 {
+ if x != nil {
+ return x.EndPort
+ }
+ return 0
+}
+
+func (x *PortNetworkPolicy) GetProtocol() v3.SocketAddress_Protocol {
+ if x != nil {
+ return x.Protocol
+ }
+ return v3.SocketAddress_TCP
+}
+
+func (x *PortNetworkPolicy) GetRules() []*PortNetworkPolicyRule {
+ if x != nil {
+ return x.Rules
+ }
+ return nil
+}
+
+type TLSContext struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // CA certificates. If present, the counterparty must provide a valid
+ // certificate.
+ // Deprecated, use 'validation_context_sds_secret' instead.
+ TrustedCa string `protobuf:"bytes,1,opt,name=trusted_ca,json=trustedCa,proto3" json:"trusted_ca,omitempty"`
+ // Certificate chain.
+ // Deprecated, use 'tls_sds_secret' instead.
+ CertificateChain string `protobuf:"bytes,2,opt,name=certificate_chain,json=certificateChain,proto3" json:"certificate_chain,omitempty"`
+ // Private key
+ // Deprecated, use 'tls_sds_secret' instead.
+ PrivateKey string `protobuf:"bytes,3,opt,name=private_key,json=privateKey,proto3" json:"private_key,omitempty"`
+ // Server Name Indicator. For downstream this helps choose the certificate to
+ // present to the client. For upstream this will be used as the SNI on the
+ // client connection.
+ ServerNames []string `protobuf:"bytes,4,rep,name=server_names,json=serverNames,proto3" json:"server_names,omitempty"`
+ // Name of an SDS secret for CA certificates. Secret is fetched from the same gRPC source as
+ // this Network Policy. If present, the counterparty must provide a valid certificate.
+ // May not be used at the same time with 'trusted_ca'.
+ ValidationContextSdsSecret string `protobuf:"bytes,5,opt,name=validation_context_sds_secret,json=validationContextSdsSecret,proto3" json:"validation_context_sds_secret,omitempty"`
+ // Name of an SDS secret for both TLS private key and certificate chain. Secret is fetched
+ // from the same gRPC source as this Network Policy.
+ // May not be used at the same time with 'certificate_chain' or 'private_key'.
+ TlsSdsSecret string `protobuf:"bytes,6,opt,name=tls_sds_secret,json=tlsSdsSecret,proto3" json:"tls_sds_secret,omitempty"`
+}
+
+func (x *TLSContext) Reset() {
+ *x = TLSContext{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cilium_api_npds_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *TLSContext) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TLSContext) ProtoMessage() {}
+
+func (x *TLSContext) ProtoReflect() protoreflect.Message {
+ mi := &file_cilium_api_npds_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TLSContext.ProtoReflect.Descriptor instead.
+func (*TLSContext) Descriptor() ([]byte, []int) {
+ return file_cilium_api_npds_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *TLSContext) GetTrustedCa() string {
+ if x != nil {
+ return x.TrustedCa
+ }
+ return ""
+}
+
+func (x *TLSContext) GetCertificateChain() string {
+ if x != nil {
+ return x.CertificateChain
+ }
+ return ""
+}
+
+func (x *TLSContext) GetPrivateKey() string {
+ if x != nil {
+ return x.PrivateKey
+ }
+ return ""
+}
+
+func (x *TLSContext) GetServerNames() []string {
+ if x != nil {
+ return x.ServerNames
+ }
+ return nil
+}
+
+func (x *TLSContext) GetValidationContextSdsSecret() string {
+ if x != nil {
+ return x.ValidationContextSdsSecret
+ }
+ return ""
+}
+
+func (x *TLSContext) GetTlsSdsSecret() string {
+ if x != nil {
+ return x.TlsSdsSecret
+ }
+ return ""
+}
+
+// A network policy rule, as a conjunction of predicates on L3/L7 flows.
+// If all the predicates of a rule match a flow, the flow is matched by the
+// rule.
+type PortNetworkPolicyRule struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Optional name for the rule, can be used in logging and error messages.
+ Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"`
+ // The set of identifiers of policies of remote hosts.
+ // A flow is matched by this predicate if the identifier of the policy
+ // applied on the flow's remote host is contained in this set.
+ // Optional. If not specified, any remote host is matched by this predicate.
+ // This field is deprecated, use remote_policies instead.
+ // TODO: Remove when Cilium 1.14 no longer supported.
+ DeprecatedRemotePolicies_64 []uint64 `protobuf:"varint,1,rep,packed,name=deprecated_remote_policies_64,json=deprecatedRemotePolicies64,proto3" json:"deprecated_remote_policies_64,omitempty"`
+ RemotePolicies []uint32 `protobuf:"varint,7,rep,packed,name=remote_policies,json=remotePolicies,proto3" json:"remote_policies,omitempty"`
+ // Optional downstream TLS context. If present, the incoming connection must
+ // be a TLS connection.
+ DownstreamTlsContext *TLSContext `protobuf:"bytes,3,opt,name=downstream_tls_context,json=downstreamTlsContext,proto3" json:"downstream_tls_context,omitempty"`
+ // Optional upstream TLS context. If present, the outgoing connection will use
+ // TLS.
+ UpstreamTlsContext *TLSContext `protobuf:"bytes,4,opt,name=upstream_tls_context,json=upstreamTlsContext,proto3" json:"upstream_tls_context,omitempty"`
+ // Optional allowed SNIs in TLS handshake.
+ ServerNames []string `protobuf:"bytes,6,rep,name=server_names,json=serverNames,proto3" json:"server_names,omitempty"`
+ // Optional L7 protocol parser name. This is only used if the parser is not
+ // one of the well knows ones. If specified, the l7 parser having this name
+ // needs to be built in to libcilium.so.
+ L7Proto string `protobuf:"bytes,2,opt,name=l7_proto,json=l7Proto,proto3" json:"l7_proto,omitempty"`
+ // Optional. If not specified, any L7 request is matched by this predicate.
+ // All rules on any given port must have the same type of L7 rules!
+ //
+ // Types that are assignable to L7:
+ //
+ // *PortNetworkPolicyRule_HttpRules
+ // *PortNetworkPolicyRule_KafkaRules
+ // *PortNetworkPolicyRule_L7Rules
+ L7 isPortNetworkPolicyRule_L7 `protobuf_oneof:"l7"`
+}
+
+func (x *PortNetworkPolicyRule) Reset() {
+ *x = PortNetworkPolicyRule{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cilium_api_npds_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *PortNetworkPolicyRule) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*PortNetworkPolicyRule) ProtoMessage() {}
+
+func (x *PortNetworkPolicyRule) ProtoReflect() protoreflect.Message {
+ mi := &file_cilium_api_npds_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use PortNetworkPolicyRule.ProtoReflect.Descriptor instead.
+func (*PortNetworkPolicyRule) Descriptor() ([]byte, []int) {
+ return file_cilium_api_npds_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *PortNetworkPolicyRule) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *PortNetworkPolicyRule) GetDeprecatedRemotePolicies_64() []uint64 {
+ if x != nil {
+ return x.DeprecatedRemotePolicies_64
+ }
+ return nil
+}
+
+func (x *PortNetworkPolicyRule) GetRemotePolicies() []uint32 {
+ if x != nil {
+ return x.RemotePolicies
+ }
+ return nil
+}
+
+func (x *PortNetworkPolicyRule) GetDownstreamTlsContext() *TLSContext {
+ if x != nil {
+ return x.DownstreamTlsContext
+ }
+ return nil
+}
+
+func (x *PortNetworkPolicyRule) GetUpstreamTlsContext() *TLSContext {
+ if x != nil {
+ return x.UpstreamTlsContext
+ }
+ return nil
+}
+
+func (x *PortNetworkPolicyRule) GetServerNames() []string {
+ if x != nil {
+ return x.ServerNames
+ }
+ return nil
+}
+
+func (x *PortNetworkPolicyRule) GetL7Proto() string {
+ if x != nil {
+ return x.L7Proto
+ }
+ return ""
+}
+
+func (m *PortNetworkPolicyRule) GetL7() isPortNetworkPolicyRule_L7 {
+ if m != nil {
+ return m.L7
+ }
+ return nil
+}
+
+func (x *PortNetworkPolicyRule) GetHttpRules() *HttpNetworkPolicyRules {
+ if x, ok := x.GetL7().(*PortNetworkPolicyRule_HttpRules); ok {
+ return x.HttpRules
+ }
+ return nil
+}
+
+func (x *PortNetworkPolicyRule) GetKafkaRules() *KafkaNetworkPolicyRules {
+ if x, ok := x.GetL7().(*PortNetworkPolicyRule_KafkaRules); ok {
+ return x.KafkaRules
+ }
+ return nil
+}
+
+func (x *PortNetworkPolicyRule) GetL7Rules() *L7NetworkPolicyRules {
+ if x, ok := x.GetL7().(*PortNetworkPolicyRule_L7Rules); ok {
+ return x.L7Rules
+ }
+ return nil
+}
+
+type isPortNetworkPolicyRule_L7 interface {
+ isPortNetworkPolicyRule_L7()
+}
+
+type PortNetworkPolicyRule_HttpRules struct {
+ // The set of HTTP network policy rules.
+ // An HTTP request is matched by this predicate if any of its rules matches
+ // the request.
+ HttpRules *HttpNetworkPolicyRules `protobuf:"bytes,100,opt,name=http_rules,json=httpRules,proto3,oneof"`
+}
+
+type PortNetworkPolicyRule_KafkaRules struct {
+ // The set of Kafka network policy rules.
+ // A Kafka request is matched by this predicate if any of its rules matches
+ // the request.
+ KafkaRules *KafkaNetworkPolicyRules `protobuf:"bytes,101,opt,name=kafka_rules,json=kafkaRules,proto3,oneof"`
+}
+
+type PortNetworkPolicyRule_L7Rules struct {
+ // Set of Generic policy rules used when 'l7_proto' is defined.
+ // Only to be used for l7 protocols for which a specific oneof
+ // is not defined
+ L7Rules *L7NetworkPolicyRules `protobuf:"bytes,102,opt,name=l7_rules,json=l7Rules,proto3,oneof"`
+}
+
+func (*PortNetworkPolicyRule_HttpRules) isPortNetworkPolicyRule_L7() {}
+
+func (*PortNetworkPolicyRule_KafkaRules) isPortNetworkPolicyRule_L7() {}
+
+func (*PortNetworkPolicyRule_L7Rules) isPortNetworkPolicyRule_L7() {}
+
+// A set of network policy rules that match HTTP requests.
+type HttpNetworkPolicyRules struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The set of HTTP network policy rules.
+ // An HTTP request is matched if any of its rules matches the request.
+ // Required and may not be empty.
+ HttpRules []*HttpNetworkPolicyRule `protobuf:"bytes,1,rep,name=http_rules,json=httpRules,proto3" json:"http_rules,omitempty"`
+}
+
+func (x *HttpNetworkPolicyRules) Reset() {
+ *x = HttpNetworkPolicyRules{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cilium_api_npds_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *HttpNetworkPolicyRules) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HttpNetworkPolicyRules) ProtoMessage() {}
+
+func (x *HttpNetworkPolicyRules) ProtoReflect() protoreflect.Message {
+ mi := &file_cilium_api_npds_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HttpNetworkPolicyRules.ProtoReflect.Descriptor instead.
+func (*HttpNetworkPolicyRules) Descriptor() ([]byte, []int) {
+ return file_cilium_api_npds_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *HttpNetworkPolicyRules) GetHttpRules() []*HttpNetworkPolicyRule {
+ if x != nil {
+ return x.HttpRules
+ }
+ return nil
+}
+
+type HeaderMatch struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` // empty for presence match. For secret data use 'value_sds_secret' instead.
+ MatchAction HeaderMatch_MatchAction `protobuf:"varint,3,opt,name=match_action,json=matchAction,proto3,enum=cilium.HeaderMatch_MatchAction" json:"match_action,omitempty"`
+ MismatchAction HeaderMatch_MismatchAction `protobuf:"varint,4,opt,name=mismatch_action,json=mismatchAction,proto3,enum=cilium.HeaderMatch_MismatchAction" json:"mismatch_action,omitempty"`
+ // Generic secret name for fetching value via SDS. Secret is fetched from the same gRPC source as
+ // this Network Policy.
+ ValueSdsSecret string `protobuf:"bytes,5,opt,name=value_sds_secret,json=valueSdsSecret,proto3" json:"value_sds_secret,omitempty"`
+}
+
+func (x *HeaderMatch) Reset() {
+ *x = HeaderMatch{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cilium_api_npds_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *HeaderMatch) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HeaderMatch) ProtoMessage() {}
+
+func (x *HeaderMatch) ProtoReflect() protoreflect.Message {
+ mi := &file_cilium_api_npds_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HeaderMatch.ProtoReflect.Descriptor instead.
+func (*HeaderMatch) Descriptor() ([]byte, []int) {
+ return file_cilium_api_npds_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *HeaderMatch) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *HeaderMatch) GetValue() string {
+ if x != nil {
+ return x.Value
+ }
+ return ""
+}
+
+func (x *HeaderMatch) GetMatchAction() HeaderMatch_MatchAction {
+ if x != nil {
+ return x.MatchAction
+ }
+ return HeaderMatch_CONTINUE_ON_MATCH
+}
+
+func (x *HeaderMatch) GetMismatchAction() HeaderMatch_MismatchAction {
+ if x != nil {
+ return x.MismatchAction
+ }
+ return HeaderMatch_FAIL_ON_MISMATCH
+}
+
+func (x *HeaderMatch) GetValueSdsSecret() string {
+ if x != nil {
+ return x.ValueSdsSecret
+ }
+ return ""
+}
+
+// An HTTP network policy rule, as a conjunction of predicates on HTTP requests.
+// If all the predicates of a rule match an HTTP request, the request is
+// allowed. Otherwise, it is denied.
+type HttpNetworkPolicyRule struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // A set of matchers on the HTTP request's headers' names and values.
+ // If all the matchers in this set match an HTTP request, the request is
+ // allowed by this rule. Otherwise, it is denied.
+ //
+ // Some special header names are:
+ //
+ // * *:uri*: The HTTP request's URI.
+ // * *:method*: The HTTP request's method.
+ // * *:authority*: Also maps to the HTTP 1.1 *Host* header.
+ //
+ // Optional. If empty, matches any HTTP request.
+ Headers []*v31.HeaderMatcher `protobuf:"bytes,1,rep,name=headers,proto3" json:"headers,omitempty"`
+ // header_matches is a set of HTTP header name and value pairs that
+ // will be matched against the request headers, if all the other match
+ // requirements in 'headers' are met. Each HeaderAction determines what to do
+ // when there is a match or mismatch.
+ //
+ // Optional.
+ HeaderMatches []*HeaderMatch `protobuf:"bytes,2,rep,name=header_matches,json=headerMatches,proto3" json:"header_matches,omitempty"`
+}
+
+func (x *HttpNetworkPolicyRule) Reset() {
+ *x = HttpNetworkPolicyRule{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cilium_api_npds_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *HttpNetworkPolicyRule) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HttpNetworkPolicyRule) ProtoMessage() {}
+
+func (x *HttpNetworkPolicyRule) ProtoReflect() protoreflect.Message {
+ mi := &file_cilium_api_npds_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HttpNetworkPolicyRule.ProtoReflect.Descriptor instead.
+func (*HttpNetworkPolicyRule) Descriptor() ([]byte, []int) {
+ return file_cilium_api_npds_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *HttpNetworkPolicyRule) GetHeaders() []*v31.HeaderMatcher {
+ if x != nil {
+ return x.Headers
+ }
+ return nil
+}
+
+func (x *HttpNetworkPolicyRule) GetHeaderMatches() []*HeaderMatch {
+ if x != nil {
+ return x.HeaderMatches
+ }
+ return nil
+}
+
+// A set of network policy rules that match Kafka requests.
+type KafkaNetworkPolicyRules struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The set of Kafka network policy rules.
+ // A Kafka request is matched if any of its rules matches the request.
+ // Required and may not be empty.
+ KafkaRules []*KafkaNetworkPolicyRule `protobuf:"bytes,1,rep,name=kafka_rules,json=kafkaRules,proto3" json:"kafka_rules,omitempty"`
+}
+
+func (x *KafkaNetworkPolicyRules) Reset() {
+ *x = KafkaNetworkPolicyRules{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cilium_api_npds_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *KafkaNetworkPolicyRules) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*KafkaNetworkPolicyRules) ProtoMessage() {}
+
+func (x *KafkaNetworkPolicyRules) ProtoReflect() protoreflect.Message {
+ mi := &file_cilium_api_npds_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use KafkaNetworkPolicyRules.ProtoReflect.Descriptor instead.
+func (*KafkaNetworkPolicyRules) Descriptor() ([]byte, []int) {
+ return file_cilium_api_npds_proto_rawDescGZIP(), []int{7}
+}
+
+func (x *KafkaNetworkPolicyRules) GetKafkaRules() []*KafkaNetworkPolicyRule {
+ if x != nil {
+ return x.KafkaRules
+ }
+ return nil
+}
+
+// A Kafka network policy rule, as a conjunction of predicates on Kafka
+// requests. If all the predicates of a rule match a Kafka request, the request
+// is allowed. Otherwise, it is denied.
+type KafkaNetworkPolicyRule struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The Kafka request's API version.
+ // If < 0, all Kafka requests are matched by this predicate.
+ ApiVersion int32 `protobuf:"varint,1,opt,name=api_version,json=apiVersion,proto3" json:"api_version,omitempty"`
+ // Set of allowed API keys in the Kafka request.
+ // If none, all Kafka requests are matched by this predicate.
+ ApiKeys []int32 `protobuf:"varint,2,rep,packed,name=api_keys,json=apiKeys,proto3" json:"api_keys,omitempty"`
+ // The Kafka request's client ID.
+ // Optional. If not specified, all Kafka requests are matched by this
+ // predicate. If specified, this predicates only matches requests that contain
+ // this client ID, and never matches requests that don't contain any client
+ // ID.
+ ClientId string `protobuf:"bytes,3,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty"`
+ // The Kafka request's topic.
+ // Optional. If not specified, this rule will not consider the Kafka request's
+ // topics. If specified, this predicates only matches requests that contain
+ // this topic, and never matches requests that don't contain any topic.
+ // However, messages that can not contain a topic will also me matched.
+ Topic string `protobuf:"bytes,4,opt,name=topic,proto3" json:"topic,omitempty"`
+}
+
+func (x *KafkaNetworkPolicyRule) Reset() {
+ *x = KafkaNetworkPolicyRule{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cilium_api_npds_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *KafkaNetworkPolicyRule) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*KafkaNetworkPolicyRule) ProtoMessage() {}
+
+func (x *KafkaNetworkPolicyRule) ProtoReflect() protoreflect.Message {
+ mi := &file_cilium_api_npds_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use KafkaNetworkPolicyRule.ProtoReflect.Descriptor instead.
+func (*KafkaNetworkPolicyRule) Descriptor() ([]byte, []int) {
+ return file_cilium_api_npds_proto_rawDescGZIP(), []int{8}
+}
+
+func (x *KafkaNetworkPolicyRule) GetApiVersion() int32 {
+ if x != nil {
+ return x.ApiVersion
+ }
+ return 0
+}
+
+func (x *KafkaNetworkPolicyRule) GetApiKeys() []int32 {
+ if x != nil {
+ return x.ApiKeys
+ }
+ return nil
+}
+
+func (x *KafkaNetworkPolicyRule) GetClientId() string {
+ if x != nil {
+ return x.ClientId
+ }
+ return ""
+}
+
+func (x *KafkaNetworkPolicyRule) GetTopic() string {
+ if x != nil {
+ return x.Topic
+ }
+ return ""
+}
+
+// A set of network policy rules that match generic L7 requests.
+type L7NetworkPolicyRules struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The set of allowing l7 policy rules.
+ // A request is allowed if any of these rules matches the request,
+ // and the request does not match any of the deny rules.
+ // Optional. If missing or empty then all requests are allowed, unless
+ // denied by a deny rule.
+ L7AllowRules []*L7NetworkPolicyRule `protobuf:"bytes,1,rep,name=l7_allow_rules,json=l7AllowRules,proto3" json:"l7_allow_rules,omitempty"`
+ // The set of denying l7 policy rules.
+ // A request is denied if any of these rules matches the request.
+ // A request that is not denied may be allowed by 'l7_allow_rules'.
+ // Optional.
+ L7DenyRules []*L7NetworkPolicyRule `protobuf:"bytes,2,rep,name=l7_deny_rules,json=l7DenyRules,proto3" json:"l7_deny_rules,omitempty"`
+}
+
+func (x *L7NetworkPolicyRules) Reset() {
+ *x = L7NetworkPolicyRules{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cilium_api_npds_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *L7NetworkPolicyRules) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*L7NetworkPolicyRules) ProtoMessage() {}
+
+func (x *L7NetworkPolicyRules) ProtoReflect() protoreflect.Message {
+ mi := &file_cilium_api_npds_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use L7NetworkPolicyRules.ProtoReflect.Descriptor instead.
+func (*L7NetworkPolicyRules) Descriptor() ([]byte, []int) {
+ return file_cilium_api_npds_proto_rawDescGZIP(), []int{9}
+}
+
+func (x *L7NetworkPolicyRules) GetL7AllowRules() []*L7NetworkPolicyRule {
+ if x != nil {
+ return x.L7AllowRules
+ }
+ return nil
+}
+
+func (x *L7NetworkPolicyRules) GetL7DenyRules() []*L7NetworkPolicyRule {
+ if x != nil {
+ return x.L7DenyRules
+ }
+ return nil
+}
+
+// A generic L7 policy rule, as a conjunction of predicates on l7 requests.
+// If all the predicates of a rule match a request, the request is allowed.
+// Otherwise, it is denied.
+type L7NetworkPolicyRule struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Optional rule name, can be used in logging and error messages.
+ Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
+ // Generic rule for Go extensions.
+ // Optional. If empty, matches any request. Not allowed if 'metadata_rule' is
+ // present.
+ Rule map[string]string `protobuf:"bytes,1,rep,name=rule,proto3" json:"rule,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ // Generic rule for Envoy metadata enforcement. All matchers must match for
+ // the rule to allow the request/connection. Optional. If empty, matches any
+ // request. Not allowed if 'rule' is present.
+ MetadataRule []*v32.MetadataMatcher `protobuf:"bytes,2,rep,name=metadata_rule,json=metadataRule,proto3" json:"metadata_rule,omitempty"`
+}
+
+func (x *L7NetworkPolicyRule) Reset() {
+ *x = L7NetworkPolicyRule{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cilium_api_npds_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *L7NetworkPolicyRule) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*L7NetworkPolicyRule) ProtoMessage() {}
+
+func (x *L7NetworkPolicyRule) ProtoReflect() protoreflect.Message {
+ mi := &file_cilium_api_npds_proto_msgTypes[10]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use L7NetworkPolicyRule.ProtoReflect.Descriptor instead.
+func (*L7NetworkPolicyRule) Descriptor() ([]byte, []int) {
+ return file_cilium_api_npds_proto_rawDescGZIP(), []int{10}
+}
+
+func (x *L7NetworkPolicyRule) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *L7NetworkPolicyRule) GetRule() map[string]string {
+ if x != nil {
+ return x.Rule
+ }
+ return nil
+}
+
+func (x *L7NetworkPolicyRule) GetMetadataRule() []*v32.MetadataMatcher {
+ if x != nil {
+ return x.MetadataRule
+ }
+ return nil
+}
+
+// Cilium's network policy manager fills this message with all currently known network policies.
+type NetworkPoliciesConfigDump struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The loaded networkpolicy configs.
+ Networkpolicies []*NetworkPolicy `protobuf:"bytes,1,rep,name=networkpolicies,proto3" json:"networkpolicies,omitempty"`
+}
+
+func (x *NetworkPoliciesConfigDump) Reset() {
+ *x = NetworkPoliciesConfigDump{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cilium_api_npds_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *NetworkPoliciesConfigDump) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*NetworkPoliciesConfigDump) ProtoMessage() {}
+
+func (x *NetworkPoliciesConfigDump) ProtoReflect() protoreflect.Message {
+ mi := &file_cilium_api_npds_proto_msgTypes[11]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use NetworkPoliciesConfigDump.ProtoReflect.Descriptor instead.
+func (*NetworkPoliciesConfigDump) Descriptor() ([]byte, []int) {
+ return file_cilium_api_npds_proto_rawDescGZIP(), []int{11}
+}
+
+func (x *NetworkPoliciesConfigDump) GetNetworkpolicies() []*NetworkPolicy {
+ if x != nil {
+ return x.Networkpolicies
+ }
+ return nil
+}
+
+var File_cilium_api_npds_proto protoreflect.FileDescriptor
+
+var file_cilium_api_npds_proto_rawDesc = []byte{
+ 0x0a, 0x15, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6e, 0x70, 0x64,
+ 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x1a,
+ 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f,
+ 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x1a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65,
+ 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x1a, 0x2a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
+ 0x2f, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2f, 0x76, 0x33, 0x2f, 0x64, 0x69,
+ 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x24, 0x65,
+ 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65,
+ 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f,
+ 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x1a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61,
+ 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xbd, 0x02, 0x0a,
+ 0x0d, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x33,
+ 0x0a, 0x0c, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x69, 0x70, 0x73, 0x18, 0x01,
+ 0x20, 0x03, 0x28, 0x09, 0x42, 0x10, 0xfa, 0x42, 0x0d, 0x92, 0x01, 0x0a, 0x08, 0x01, 0x10, 0x02,
+ 0x22, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0b, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74,
+ 0x49, 0x70, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f,
+ 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69,
+ 0x6e, 0x74, 0x49, 0x64, 0x12, 0x54, 0x0a, 0x19, 0x69, 0x6e, 0x67, 0x72, 0x65, 0x73, 0x73, 0x5f,
+ 0x70, 0x65, 0x72, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65,
+ 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d,
+ 0x2e, 0x50, 0x6f, 0x72, 0x74, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x50, 0x6f, 0x6c, 0x69,
+ 0x63, 0x79, 0x52, 0x16, 0x69, 0x6e, 0x67, 0x72, 0x65, 0x73, 0x73, 0x50, 0x65, 0x72, 0x50, 0x6f,
+ 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x12, 0x52, 0x0a, 0x18, 0x65, 0x67,
+ 0x72, 0x65, 0x73, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x70, 0x6f,
+ 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63,
+ 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2e, 0x50, 0x6f, 0x72, 0x74, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72,
+ 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x15, 0x65, 0x67, 0x72, 0x65, 0x73, 0x73, 0x50,
+ 0x65, 0x72, 0x50, 0x6f, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x12, 0x2c,
+ 0x0a, 0x12, 0x63, 0x6f, 0x6e, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x6b, 0x5f, 0x6d, 0x61, 0x70, 0x5f,
+ 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x63, 0x6f, 0x6e, 0x6e,
+ 0x74, 0x72, 0x61, 0x63, 0x6b, 0x4d, 0x61, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0xd7, 0x01, 0x0a,
+ 0x11, 0x50, 0x6f, 0x72, 0x74, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x50, 0x6f, 0x6c, 0x69,
+ 0x63, 0x79, 0x12, 0x1d, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d,
+ 0x42, 0x09, 0xfa, 0x42, 0x06, 0x2a, 0x04, 0x18, 0xff, 0xff, 0x03, 0x52, 0x04, 0x70, 0x6f, 0x72,
+ 0x74, 0x12, 0x24, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20,
+ 0x01, 0x28, 0x0d, 0x42, 0x09, 0xfa, 0x42, 0x06, 0x2a, 0x04, 0x18, 0xff, 0xff, 0x03, 0x52, 0x07,
+ 0x65, 0x6e, 0x64, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x48, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x63, 0x6f, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33,
+ 0x2e, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x50,
+ 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f,
+ 0x6c, 0x12, 0x33, 0x0a, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x1d, 0x2e, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2e, 0x50, 0x6f, 0x72, 0x74, 0x4e, 0x65,
+ 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x75, 0x6c, 0x65, 0x52,
+ 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x22, 0x85, 0x02, 0x0a, 0x0a, 0x54, 0x4c, 0x53, 0x43, 0x6f,
+ 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64,
+ 0x5f, 0x63, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x74, 0x72, 0x75, 0x73, 0x74,
+ 0x65, 0x64, 0x43, 0x61, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63,
+ 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69,
+ 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x5f, 0x6b, 0x65, 0x79,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b,
+ 0x65, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d,
+ 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72,
+ 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x41, 0x0a, 0x1d, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x73, 0x64, 0x73, 0x5f,
+ 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1a, 0x76, 0x61,
+ 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x53,
+ 0x64, 0x73, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x12, 0x24, 0x0a, 0x0e, 0x74, 0x6c, 0x73, 0x5f,
+ 0x73, 0x64, 0x73, 0x5f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x0c, 0x74, 0x6c, 0x73, 0x53, 0x64, 0x73, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x22, 0xab,
+ 0x04, 0x0a, 0x15, 0x50, 0x6f, 0x72, 0x74, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x50, 0x6f,
+ 0x6c, 0x69, 0x63, 0x79, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x41, 0x0a, 0x1d,
+ 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x74,
+ 0x65, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x5f, 0x36, 0x34, 0x18, 0x01, 0x20,
+ 0x03, 0x28, 0x04, 0x52, 0x1a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x52,
+ 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x36, 0x34, 0x12,
+ 0x27, 0x0a, 0x0f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69,
+ 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x0e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65,
+ 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x12, 0x48, 0x0a, 0x16, 0x64, 0x6f, 0x77, 0x6e,
+ 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65,
+ 0x78, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x69, 0x6c, 0x69, 0x75,
+ 0x6d, 0x2e, 0x54, 0x4c, 0x53, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x52, 0x14, 0x64, 0x6f,
+ 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65,
+ 0x78, 0x74, 0x12, 0x44, 0x0a, 0x14, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x74,
+ 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x12, 0x2e, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2e, 0x54, 0x4c, 0x53, 0x43, 0x6f, 0x6e,
+ 0x74, 0x65, 0x78, 0x74, 0x52, 0x12, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x54, 0x6c,
+ 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76,
+ 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b,
+ 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x6c,
+ 0x37, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6c,
+ 0x37, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x3f, 0x0a, 0x0a, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x72,
+ 0x75, 0x6c, 0x65, 0x73, 0x18, 0x64, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x63, 0x69, 0x6c,
+ 0x69, 0x75, 0x6d, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x50,
+ 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x48, 0x00, 0x52, 0x09, 0x68, 0x74,
+ 0x74, 0x70, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x42, 0x0a, 0x0b, 0x6b, 0x61, 0x66, 0x6b, 0x61,
+ 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x65, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x63,
+ 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2e, 0x4b, 0x61, 0x66, 0x6b, 0x61, 0x4e, 0x65, 0x74, 0x77, 0x6f,
+ 0x72, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x48, 0x00, 0x52,
+ 0x0a, 0x6b, 0x61, 0x66, 0x6b, 0x61, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x08, 0x6c,
+ 0x37, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x66, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e,
+ 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2e, 0x4c, 0x37, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b,
+ 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x48, 0x00, 0x52, 0x07, 0x6c,
+ 0x37, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x42, 0x04, 0x0a, 0x02, 0x6c, 0x37, 0x22, 0x60, 0x0a, 0x16,
+ 0x48, 0x74, 0x74, 0x70, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63,
+ 0x79, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x46, 0x0a, 0x0a, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x72,
+ 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x63, 0x69, 0x6c,
+ 0x69, 0x75, 0x6d, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x50,
+ 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x75, 0x6c, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01,
+ 0x02, 0x08, 0x01, 0x52, 0x09, 0x68, 0x74, 0x74, 0x70, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x22, 0xd2,
+ 0x03, 0x0a, 0x0b, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x1b,
+ 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42,
+ 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x12, 0x42, 0x0a, 0x0c, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x6f,
+ 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1f, 0x2e, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d,
+ 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x4d, 0x61, 0x74,
+ 0x63, 0x68, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x41,
+ 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4b, 0x0a, 0x0f, 0x6d, 0x69, 0x73, 0x6d, 0x61, 0x74, 0x63,
+ 0x68, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x22,
+ 0x2e, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61,
+ 0x74, 0x63, 0x68, 0x2e, 0x4d, 0x69, 0x73, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x41, 0x63, 0x74, 0x69,
+ 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x69, 0x73, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x41, 0x63, 0x74, 0x69,
+ 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x10, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x73, 0x64, 0x73, 0x5f,
+ 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x53, 0x64, 0x73, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x22, 0x4c, 0x0a, 0x0b,
+ 0x4d, 0x61, 0x74, 0x63, 0x68, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x15, 0x0a, 0x11, 0x43,
+ 0x4f, 0x4e, 0x54, 0x49, 0x4e, 0x55, 0x45, 0x5f, 0x4f, 0x4e, 0x5f, 0x4d, 0x41, 0x54, 0x43, 0x48,
+ 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x46, 0x41, 0x49, 0x4c, 0x5f, 0x4f, 0x4e, 0x5f, 0x4d, 0x41,
+ 0x54, 0x43, 0x48, 0x10, 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x5f,
+ 0x4f, 0x4e, 0x5f, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x02, 0x22, 0x86, 0x01, 0x0a, 0x0e, 0x4d,
+ 0x69, 0x73, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a,
+ 0x10, 0x46, 0x41, 0x49, 0x4c, 0x5f, 0x4f, 0x4e, 0x5f, 0x4d, 0x49, 0x53, 0x4d, 0x41, 0x54, 0x43,
+ 0x48, 0x10, 0x00, 0x12, 0x18, 0x0a, 0x14, 0x43, 0x4f, 0x4e, 0x54, 0x49, 0x4e, 0x55, 0x45, 0x5f,
+ 0x4f, 0x4e, 0x5f, 0x4d, 0x49, 0x53, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x01, 0x12, 0x13, 0x0a,
+ 0x0f, 0x41, 0x44, 0x44, 0x5f, 0x4f, 0x4e, 0x5f, 0x4d, 0x49, 0x53, 0x4d, 0x41, 0x54, 0x43, 0x48,
+ 0x10, 0x02, 0x12, 0x16, 0x0a, 0x12, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x5f, 0x4f, 0x4e, 0x5f,
+ 0x4d, 0x49, 0x53, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x03, 0x12, 0x17, 0x0a, 0x13, 0x52, 0x45,
+ 0x50, 0x4c, 0x41, 0x43, 0x45, 0x5f, 0x4f, 0x4e, 0x5f, 0x4d, 0x49, 0x53, 0x4d, 0x41, 0x54, 0x43,
+ 0x48, 0x10, 0x04, 0x22, 0x93, 0x01, 0x0a, 0x15, 0x48, 0x74, 0x74, 0x70, 0x4e, 0x65, 0x74, 0x77,
+ 0x6f, 0x72, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x3e, 0x0a,
+ 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24,
+ 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f,
+ 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x74,
+ 0x63, 0x68, 0x65, 0x72, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x3a, 0x0a,
+ 0x0e, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x18,
+ 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2e, 0x48,
+ 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x52, 0x0d, 0x68, 0x65, 0x61, 0x64,
+ 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x22, 0x64, 0x0a, 0x17, 0x4b, 0x61, 0x66,
+ 0x6b, 0x61, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52,
+ 0x75, 0x6c, 0x65, 0x73, 0x12, 0x49, 0x0a, 0x0b, 0x6b, 0x61, 0x66, 0x6b, 0x61, 0x5f, 0x72, 0x75,
+ 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x63, 0x69, 0x6c, 0x69,
+ 0x75, 0x6d, 0x2e, 0x4b, 0x61, 0x66, 0x6b, 0x61, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x50,
+ 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x75, 0x6c, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01,
+ 0x02, 0x08, 0x01, 0x52, 0x0a, 0x6b, 0x61, 0x66, 0x6b, 0x61, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x22,
+ 0xbe, 0x01, 0x0a, 0x16, 0x4b, 0x61, 0x66, 0x6b, 0x61, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b,
+ 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x70,
+ 0x69, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52,
+ 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, 0x08, 0x61,
+ 0x70, 0x69, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x52, 0x07, 0x61,
+ 0x70, 0x69, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x35, 0x0a, 0x09, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74,
+ 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x18, 0xfa, 0x42, 0x15, 0x72, 0x13,
+ 0x32, 0x11, 0x5e, 0x5b, 0x61, 0x2d, 0x7a, 0x41, 0x2d, 0x5a, 0x30, 0x2d, 0x39, 0x2e, 0x5f, 0x2d,
+ 0x5d, 0x2a, 0x24, 0x52, 0x08, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x31, 0x0a,
+ 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x1b, 0xfa, 0x42,
+ 0x18, 0x72, 0x16, 0x18, 0xff, 0x01, 0x32, 0x11, 0x5e, 0x5b, 0x61, 0x2d, 0x7a, 0x41, 0x2d, 0x5a,
+ 0x30, 0x2d, 0x39, 0x2e, 0x5f, 0x2d, 0x5d, 0x2a, 0x24, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63,
+ 0x22, 0x9a, 0x01, 0x0a, 0x14, 0x4c, 0x37, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x50, 0x6f,
+ 0x6c, 0x69, 0x63, 0x79, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x41, 0x0a, 0x0e, 0x6c, 0x37, 0x5f,
+ 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28,
+ 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2e, 0x4c, 0x37, 0x4e, 0x65, 0x74,
+ 0x77, 0x6f, 0x72, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x0c,
+ 0x6c, 0x37, 0x41, 0x6c, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x3f, 0x0a, 0x0d,
+ 0x6c, 0x37, 0x5f, 0x64, 0x65, 0x6e, 0x79, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x02, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2e, 0x4c, 0x37, 0x4e,
+ 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x75, 0x6c, 0x65,
+ 0x52, 0x0b, 0x6c, 0x37, 0x44, 0x65, 0x6e, 0x79, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x22, 0xea, 0x01,
+ 0x0a, 0x13, 0x4c, 0x37, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63,
+ 0x79, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x04, 0x72, 0x75, 0x6c,
+ 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d,
+ 0x2e, 0x4c, 0x37, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79,
+ 0x52, 0x75, 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04,
+ 0x72, 0x75, 0x6c, 0x65, 0x12, 0x4b, 0x0a, 0x0d, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61,
+ 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x65, 0x6e,
+ 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72,
+ 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4d, 0x61, 0x74, 0x63,
+ 0x68, 0x65, 0x72, 0x52, 0x0c, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x75, 0x6c,
+ 0x65, 0x1a, 0x37, 0x0a, 0x09, 0x52, 0x75, 0x6c, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10,
+ 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79,
+ 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x5c, 0x0a, 0x19, 0x4e, 0x65,
+ 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x43, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x12, 0x3f, 0x0a, 0x0f, 0x6e, 0x65, 0x74, 0x77, 0x6f,
+ 0x72, 0x6b, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x15, 0x2e, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2e, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72,
+ 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0f, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b,
+ 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x32, 0xda, 0x02, 0x0a, 0x1d, 0x4e, 0x65, 0x74,
+ 0x77, 0x6f, 0x72, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76,
+ 0x65, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x7a, 0x0a, 0x15, 0x53, 0x74,
+ 0x72, 0x65, 0x61, 0x6d, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63,
+ 0x69, 0x65, 0x73, 0x12, 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x33,
+ 0x2e, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x1a, 0x2d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63,
+ 0x65, 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x33, 0x2e, 0x44,
+ 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x9e, 0x01, 0x0a, 0x14, 0x46, 0x65, 0x74, 0x63, 0x68,
+ 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x12,
+ 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e,
+ 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x69, 0x73,
+ 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x64, 0x69,
+ 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x69, 0x73, 0x63, 0x6f,
+ 0x76, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x29, 0x82, 0xd3,
+ 0xe4, 0x93, 0x02, 0x23, 0x22, 0x1e, 0x2f, 0x76, 0x33, 0x2f, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76,
+ 0x65, 0x72, 0x79, 0x3a, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x70, 0x6f, 0x6c, 0x69,
+ 0x63, 0x69, 0x65, 0x73, 0x3a, 0x01, 0x2a, 0x1a, 0x1c, 0x8a, 0xa4, 0x96, 0xf3, 0x07, 0x16, 0x0a,
+ 0x14, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2e, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x50,
+ 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x2e, 0x5a, 0x2c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e,
+ 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2f, 0x70, 0x72, 0x6f, 0x78, 0x79,
+ 0x2f, 0x67, 0x6f, 0x2f, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2f, 0x61, 0x70, 0x69, 0x3b, 0x63,
+ 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_cilium_api_npds_proto_rawDescOnce sync.Once
+ file_cilium_api_npds_proto_rawDescData = file_cilium_api_npds_proto_rawDesc
+)
+
+func file_cilium_api_npds_proto_rawDescGZIP() []byte {
+ file_cilium_api_npds_proto_rawDescOnce.Do(func() {
+ file_cilium_api_npds_proto_rawDescData = protoimpl.X.CompressGZIP(file_cilium_api_npds_proto_rawDescData)
+ })
+ return file_cilium_api_npds_proto_rawDescData
+}
+
+var file_cilium_api_npds_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
+var file_cilium_api_npds_proto_msgTypes = make([]protoimpl.MessageInfo, 13)
+var file_cilium_api_npds_proto_goTypes = []interface{}{
+ (HeaderMatch_MatchAction)(0), // 0: cilium.HeaderMatch.MatchAction
+ (HeaderMatch_MismatchAction)(0), // 1: cilium.HeaderMatch.MismatchAction
+ (*NetworkPolicy)(nil), // 2: cilium.NetworkPolicy
+ (*PortNetworkPolicy)(nil), // 3: cilium.PortNetworkPolicy
+ (*TLSContext)(nil), // 4: cilium.TLSContext
+ (*PortNetworkPolicyRule)(nil), // 5: cilium.PortNetworkPolicyRule
+ (*HttpNetworkPolicyRules)(nil), // 6: cilium.HttpNetworkPolicyRules
+ (*HeaderMatch)(nil), // 7: cilium.HeaderMatch
+ (*HttpNetworkPolicyRule)(nil), // 8: cilium.HttpNetworkPolicyRule
+ (*KafkaNetworkPolicyRules)(nil), // 9: cilium.KafkaNetworkPolicyRules
+ (*KafkaNetworkPolicyRule)(nil), // 10: cilium.KafkaNetworkPolicyRule
+ (*L7NetworkPolicyRules)(nil), // 11: cilium.L7NetworkPolicyRules
+ (*L7NetworkPolicyRule)(nil), // 12: cilium.L7NetworkPolicyRule
+ (*NetworkPoliciesConfigDump)(nil), // 13: cilium.NetworkPoliciesConfigDump
+ nil, // 14: cilium.L7NetworkPolicyRule.RuleEntry
+ (v3.SocketAddress_Protocol)(0), // 15: envoy.config.core.v3.SocketAddress.Protocol
+ (*v31.HeaderMatcher)(nil), // 16: envoy.config.route.v3.HeaderMatcher
+ (*v32.MetadataMatcher)(nil), // 17: envoy.type.matcher.v3.MetadataMatcher
+ (*v33.DiscoveryRequest)(nil), // 18: envoy.service.discovery.v3.DiscoveryRequest
+ (*v33.DiscoveryResponse)(nil), // 19: envoy.service.discovery.v3.DiscoveryResponse
+}
+var file_cilium_api_npds_proto_depIdxs = []int32{
+ 3, // 0: cilium.NetworkPolicy.ingress_per_port_policies:type_name -> cilium.PortNetworkPolicy
+ 3, // 1: cilium.NetworkPolicy.egress_per_port_policies:type_name -> cilium.PortNetworkPolicy
+ 15, // 2: cilium.PortNetworkPolicy.protocol:type_name -> envoy.config.core.v3.SocketAddress.Protocol
+ 5, // 3: cilium.PortNetworkPolicy.rules:type_name -> cilium.PortNetworkPolicyRule
+ 4, // 4: cilium.PortNetworkPolicyRule.downstream_tls_context:type_name -> cilium.TLSContext
+ 4, // 5: cilium.PortNetworkPolicyRule.upstream_tls_context:type_name -> cilium.TLSContext
+ 6, // 6: cilium.PortNetworkPolicyRule.http_rules:type_name -> cilium.HttpNetworkPolicyRules
+ 9, // 7: cilium.PortNetworkPolicyRule.kafka_rules:type_name -> cilium.KafkaNetworkPolicyRules
+ 11, // 8: cilium.PortNetworkPolicyRule.l7_rules:type_name -> cilium.L7NetworkPolicyRules
+ 8, // 9: cilium.HttpNetworkPolicyRules.http_rules:type_name -> cilium.HttpNetworkPolicyRule
+ 0, // 10: cilium.HeaderMatch.match_action:type_name -> cilium.HeaderMatch.MatchAction
+ 1, // 11: cilium.HeaderMatch.mismatch_action:type_name -> cilium.HeaderMatch.MismatchAction
+ 16, // 12: cilium.HttpNetworkPolicyRule.headers:type_name -> envoy.config.route.v3.HeaderMatcher
+ 7, // 13: cilium.HttpNetworkPolicyRule.header_matches:type_name -> cilium.HeaderMatch
+ 10, // 14: cilium.KafkaNetworkPolicyRules.kafka_rules:type_name -> cilium.KafkaNetworkPolicyRule
+ 12, // 15: cilium.L7NetworkPolicyRules.l7_allow_rules:type_name -> cilium.L7NetworkPolicyRule
+ 12, // 16: cilium.L7NetworkPolicyRules.l7_deny_rules:type_name -> cilium.L7NetworkPolicyRule
+ 14, // 17: cilium.L7NetworkPolicyRule.rule:type_name -> cilium.L7NetworkPolicyRule.RuleEntry
+ 17, // 18: cilium.L7NetworkPolicyRule.metadata_rule:type_name -> envoy.type.matcher.v3.MetadataMatcher
+ 2, // 19: cilium.NetworkPoliciesConfigDump.networkpolicies:type_name -> cilium.NetworkPolicy
+ 18, // 20: cilium.NetworkPolicyDiscoveryService.StreamNetworkPolicies:input_type -> envoy.service.discovery.v3.DiscoveryRequest
+ 18, // 21: cilium.NetworkPolicyDiscoveryService.FetchNetworkPolicies:input_type -> envoy.service.discovery.v3.DiscoveryRequest
+ 19, // 22: cilium.NetworkPolicyDiscoveryService.StreamNetworkPolicies:output_type -> envoy.service.discovery.v3.DiscoveryResponse
+ 19, // 23: cilium.NetworkPolicyDiscoveryService.FetchNetworkPolicies:output_type -> envoy.service.discovery.v3.DiscoveryResponse
+ 22, // [22:24] is the sub-list for method output_type
+ 20, // [20:22] is the sub-list for method input_type
+ 20, // [20:20] is the sub-list for extension type_name
+ 20, // [20:20] is the sub-list for extension extendee
+ 0, // [0:20] is the sub-list for field type_name
+}
+
+func init() { file_cilium_api_npds_proto_init() }
+func file_cilium_api_npds_proto_init() {
+ if File_cilium_api_npds_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_cilium_api_npds_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*NetworkPolicy); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cilium_api_npds_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*PortNetworkPolicy); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cilium_api_npds_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*TLSContext); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cilium_api_npds_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*PortNetworkPolicyRule); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cilium_api_npds_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*HttpNetworkPolicyRules); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cilium_api_npds_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*HeaderMatch); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cilium_api_npds_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*HttpNetworkPolicyRule); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cilium_api_npds_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*KafkaNetworkPolicyRules); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cilium_api_npds_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*KafkaNetworkPolicyRule); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cilium_api_npds_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*L7NetworkPolicyRules); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cilium_api_npds_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*L7NetworkPolicyRule); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cilium_api_npds_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*NetworkPoliciesConfigDump); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_cilium_api_npds_proto_msgTypes[3].OneofWrappers = []interface{}{
+ (*PortNetworkPolicyRule_HttpRules)(nil),
+ (*PortNetworkPolicyRule_KafkaRules)(nil),
+ (*PortNetworkPolicyRule_L7Rules)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_cilium_api_npds_proto_rawDesc,
+ NumEnums: 2,
+ NumMessages: 13,
+ NumExtensions: 0,
+ NumServices: 1,
+ },
+ GoTypes: file_cilium_api_npds_proto_goTypes,
+ DependencyIndexes: file_cilium_api_npds_proto_depIdxs,
+ EnumInfos: file_cilium_api_npds_proto_enumTypes,
+ MessageInfos: file_cilium_api_npds_proto_msgTypes,
+ }.Build()
+ File_cilium_api_npds_proto = out.File
+ file_cilium_api_npds_proto_rawDesc = nil
+ file_cilium_api_npds_proto_goTypes = nil
+ file_cilium_api_npds_proto_depIdxs = nil
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConnInterface
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion6
+
+// NetworkPolicyDiscoveryServiceClient is the client API for NetworkPolicyDiscoveryService service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type NetworkPolicyDiscoveryServiceClient interface {
+ StreamNetworkPolicies(ctx context.Context, opts ...grpc.CallOption) (NetworkPolicyDiscoveryService_StreamNetworkPoliciesClient, error)
+ FetchNetworkPolicies(ctx context.Context, in *v33.DiscoveryRequest, opts ...grpc.CallOption) (*v33.DiscoveryResponse, error)
+}
+
+type networkPolicyDiscoveryServiceClient struct {
+ cc grpc.ClientConnInterface
+}
+
+func NewNetworkPolicyDiscoveryServiceClient(cc grpc.ClientConnInterface) NetworkPolicyDiscoveryServiceClient {
+ return &networkPolicyDiscoveryServiceClient{cc}
+}
+
+func (c *networkPolicyDiscoveryServiceClient) StreamNetworkPolicies(ctx context.Context, opts ...grpc.CallOption) (NetworkPolicyDiscoveryService_StreamNetworkPoliciesClient, error) {
+ stream, err := c.cc.NewStream(ctx, &_NetworkPolicyDiscoveryService_serviceDesc.Streams[0], "/cilium.NetworkPolicyDiscoveryService/StreamNetworkPolicies", opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &networkPolicyDiscoveryServiceStreamNetworkPoliciesClient{stream}
+ return x, nil
+}
+
+type NetworkPolicyDiscoveryService_StreamNetworkPoliciesClient interface {
+ Send(*v33.DiscoveryRequest) error
+ Recv() (*v33.DiscoveryResponse, error)
+ grpc.ClientStream
+}
+
+type networkPolicyDiscoveryServiceStreamNetworkPoliciesClient struct {
+ grpc.ClientStream
+}
+
+func (x *networkPolicyDiscoveryServiceStreamNetworkPoliciesClient) Send(m *v33.DiscoveryRequest) error {
+ return x.ClientStream.SendMsg(m)
+}
+
+func (x *networkPolicyDiscoveryServiceStreamNetworkPoliciesClient) Recv() (*v33.DiscoveryResponse, error) {
+ m := new(v33.DiscoveryResponse)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func (c *networkPolicyDiscoveryServiceClient) FetchNetworkPolicies(ctx context.Context, in *v33.DiscoveryRequest, opts ...grpc.CallOption) (*v33.DiscoveryResponse, error) {
+ out := new(v33.DiscoveryResponse)
+ err := c.cc.Invoke(ctx, "/cilium.NetworkPolicyDiscoveryService/FetchNetworkPolicies", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// NetworkPolicyDiscoveryServiceServer is the server API for NetworkPolicyDiscoveryService service.
+type NetworkPolicyDiscoveryServiceServer interface {
+ StreamNetworkPolicies(NetworkPolicyDiscoveryService_StreamNetworkPoliciesServer) error
+ FetchNetworkPolicies(context.Context, *v33.DiscoveryRequest) (*v33.DiscoveryResponse, error)
+}
+
+// UnimplementedNetworkPolicyDiscoveryServiceServer can be embedded to have forward compatible implementations.
+type UnimplementedNetworkPolicyDiscoveryServiceServer struct {
+}
+
+func (*UnimplementedNetworkPolicyDiscoveryServiceServer) StreamNetworkPolicies(NetworkPolicyDiscoveryService_StreamNetworkPoliciesServer) error {
+ return status.Errorf(codes.Unimplemented, "method StreamNetworkPolicies not implemented")
+}
+func (*UnimplementedNetworkPolicyDiscoveryServiceServer) FetchNetworkPolicies(context.Context, *v33.DiscoveryRequest) (*v33.DiscoveryResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method FetchNetworkPolicies not implemented")
+}
+
+func RegisterNetworkPolicyDiscoveryServiceServer(s *grpc.Server, srv NetworkPolicyDiscoveryServiceServer) {
+ s.RegisterService(&_NetworkPolicyDiscoveryService_serviceDesc, srv)
+}
+
+func _NetworkPolicyDiscoveryService_StreamNetworkPolicies_Handler(srv interface{}, stream grpc.ServerStream) error {
+ return srv.(NetworkPolicyDiscoveryServiceServer).StreamNetworkPolicies(&networkPolicyDiscoveryServiceStreamNetworkPoliciesServer{stream})
+}
+
+type NetworkPolicyDiscoveryService_StreamNetworkPoliciesServer interface {
+ Send(*v33.DiscoveryResponse) error
+ Recv() (*v33.DiscoveryRequest, error)
+ grpc.ServerStream
+}
+
+type networkPolicyDiscoveryServiceStreamNetworkPoliciesServer struct {
+ grpc.ServerStream
+}
+
+func (x *networkPolicyDiscoveryServiceStreamNetworkPoliciesServer) Send(m *v33.DiscoveryResponse) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+func (x *networkPolicyDiscoveryServiceStreamNetworkPoliciesServer) Recv() (*v33.DiscoveryRequest, error) {
+ m := new(v33.DiscoveryRequest)
+ if err := x.ServerStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func _NetworkPolicyDiscoveryService_FetchNetworkPolicies_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(v33.DiscoveryRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(NetworkPolicyDiscoveryServiceServer).FetchNetworkPolicies(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/cilium.NetworkPolicyDiscoveryService/FetchNetworkPolicies",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(NetworkPolicyDiscoveryServiceServer).FetchNetworkPolicies(ctx, req.(*v33.DiscoveryRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _NetworkPolicyDiscoveryService_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "cilium.NetworkPolicyDiscoveryService",
+ HandlerType: (*NetworkPolicyDiscoveryServiceServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "FetchNetworkPolicies",
+ Handler: _NetworkPolicyDiscoveryService_FetchNetworkPolicies_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{
+ {
+ StreamName: "StreamNetworkPolicies",
+ Handler: _NetworkPolicyDiscoveryService_StreamNetworkPolicies_Handler,
+ ServerStreams: true,
+ ClientStreams: true,
+ },
+ },
+ Metadata: "cilium/api/npds.proto",
+}
diff --git a/vendor/github.com/cilium/proxy/go/cilium/api/npds.pb.validate.go b/vendor/github.com/cilium/proxy/go/cilium/api/npds.pb.validate.go
new file mode 100644
index 000000000..19ddf8aaa
--- /dev/null
+++ b/vendor/github.com/cilium/proxy/go/cilium/api/npds.pb.validate.go
@@ -0,0 +1,1969 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: cilium/api/npds.proto
+
+package cilium
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+
+ v3 "github.com/cilium/proxy/go/envoy/config/core/v3"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+
+ _ = v3.SocketAddress_Protocol(0)
+)
+
+// Validate checks the field values on NetworkPolicy with the rules defined in
+// the proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *NetworkPolicy) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on NetworkPolicy with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in NetworkPolicyMultiError, or
+// nil if none found.
+func (m *NetworkPolicy) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *NetworkPolicy) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if l := len(m.GetEndpointIps()); l < 1 || l > 2 {
+ err := NetworkPolicyValidationError{
+ field: "EndpointIps",
+ reason: "value must contain between 1 and 2 items, inclusive",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ for idx, item := range m.GetEndpointIps() {
+ _, _ = idx, item
+
+ if utf8.RuneCountInString(item) < 1 {
+ err := NetworkPolicyValidationError{
+ field: fmt.Sprintf("EndpointIps[%v]", idx),
+ reason: "value length must be at least 1 runes",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ }
+
+ // no validation rules for EndpointId
+
+ for idx, item := range m.GetIngressPerPortPolicies() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, NetworkPolicyValidationError{
+ field: fmt.Sprintf("IngressPerPortPolicies[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, NetworkPolicyValidationError{
+ field: fmt.Sprintf("IngressPerPortPolicies[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return NetworkPolicyValidationError{
+ field: fmt.Sprintf("IngressPerPortPolicies[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ for idx, item := range m.GetEgressPerPortPolicies() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, NetworkPolicyValidationError{
+ field: fmt.Sprintf("EgressPerPortPolicies[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, NetworkPolicyValidationError{
+ field: fmt.Sprintf("EgressPerPortPolicies[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return NetworkPolicyValidationError{
+ field: fmt.Sprintf("EgressPerPortPolicies[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ // no validation rules for ConntrackMapName
+
+ if len(errors) > 0 {
+ return NetworkPolicyMultiError(errors)
+ }
+
+ return nil
+}
+
+// NetworkPolicyMultiError is an error wrapping multiple validation errors
+// returned by NetworkPolicy.ValidateAll() if the designated constraints
+// aren't met.
+type NetworkPolicyMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m NetworkPolicyMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m NetworkPolicyMultiError) AllErrors() []error { return m }
+
+// NetworkPolicyValidationError is the validation error returned by
+// NetworkPolicy.Validate if the designated constraints aren't met.
+type NetworkPolicyValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e NetworkPolicyValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e NetworkPolicyValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e NetworkPolicyValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e NetworkPolicyValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e NetworkPolicyValidationError) ErrorName() string { return "NetworkPolicyValidationError" }
+
+// Error satisfies the builtin error interface
+func (e NetworkPolicyValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sNetworkPolicy.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = NetworkPolicyValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = NetworkPolicyValidationError{}
+
+// Validate checks the field values on PortNetworkPolicy with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// first error encountered is returned, or nil if there are no violations.
+func (m *PortNetworkPolicy) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on PortNetworkPolicy with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// PortNetworkPolicyMultiError, or nil if none found.
+func (m *PortNetworkPolicy) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *PortNetworkPolicy) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if m.GetPort() > 65535 {
+ err := PortNetworkPolicyValidationError{
+ field: "Port",
+ reason: "value must be less than or equal to 65535",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if m.GetEndPort() > 65535 {
+ err := PortNetworkPolicyValidationError{
+ field: "EndPort",
+ reason: "value must be less than or equal to 65535",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ // no validation rules for Protocol
+
+ for idx, item := range m.GetRules() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, PortNetworkPolicyValidationError{
+ field: fmt.Sprintf("Rules[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, PortNetworkPolicyValidationError{
+ field: fmt.Sprintf("Rules[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return PortNetworkPolicyValidationError{
+ field: fmt.Sprintf("Rules[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return PortNetworkPolicyMultiError(errors)
+ }
+
+ return nil
+}
+
+// PortNetworkPolicyMultiError is an error wrapping multiple validation errors
+// returned by PortNetworkPolicy.ValidateAll() if the designated constraints
+// aren't met.
+type PortNetworkPolicyMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m PortNetworkPolicyMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m PortNetworkPolicyMultiError) AllErrors() []error { return m }
+
+// PortNetworkPolicyValidationError is the validation error returned by
+// PortNetworkPolicy.Validate if the designated constraints aren't met.
+type PortNetworkPolicyValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e PortNetworkPolicyValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e PortNetworkPolicyValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e PortNetworkPolicyValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e PortNetworkPolicyValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e PortNetworkPolicyValidationError) ErrorName() string {
+ return "PortNetworkPolicyValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e PortNetworkPolicyValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sPortNetworkPolicy.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = PortNetworkPolicyValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = PortNetworkPolicyValidationError{}
+
+// Validate checks the field values on TLSContext with the rules defined in the
+// proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *TLSContext) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on TLSContext with the rules defined in
+// the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in TLSContextMultiError, or
+// nil if none found.
+func (m *TLSContext) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *TLSContext) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for TrustedCa
+
+ // no validation rules for CertificateChain
+
+ // no validation rules for PrivateKey
+
+ // no validation rules for ValidationContextSdsSecret
+
+ // no validation rules for TlsSdsSecret
+
+ if len(errors) > 0 {
+ return TLSContextMultiError(errors)
+ }
+
+ return nil
+}
+
+// TLSContextMultiError is an error wrapping multiple validation errors
+// returned by TLSContext.ValidateAll() if the designated constraints aren't met.
+type TLSContextMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m TLSContextMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m TLSContextMultiError) AllErrors() []error { return m }
+
+// TLSContextValidationError is the validation error returned by
+// TLSContext.Validate if the designated constraints aren't met.
+type TLSContextValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e TLSContextValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e TLSContextValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e TLSContextValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e TLSContextValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e TLSContextValidationError) ErrorName() string { return "TLSContextValidationError" }
+
+// Error satisfies the builtin error interface
+func (e TLSContextValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sTLSContext.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = TLSContextValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = TLSContextValidationError{}
+
+// Validate checks the field values on PortNetworkPolicyRule with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *PortNetworkPolicyRule) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on PortNetworkPolicyRule with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// PortNetworkPolicyRuleMultiError, or nil if none found.
+func (m *PortNetworkPolicyRule) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *PortNetworkPolicyRule) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for Name
+
+ if all {
+ switch v := interface{}(m.GetDownstreamTlsContext()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, PortNetworkPolicyRuleValidationError{
+ field: "DownstreamTlsContext",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, PortNetworkPolicyRuleValidationError{
+ field: "DownstreamTlsContext",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetDownstreamTlsContext()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return PortNetworkPolicyRuleValidationError{
+ field: "DownstreamTlsContext",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetUpstreamTlsContext()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, PortNetworkPolicyRuleValidationError{
+ field: "UpstreamTlsContext",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, PortNetworkPolicyRuleValidationError{
+ field: "UpstreamTlsContext",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetUpstreamTlsContext()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return PortNetworkPolicyRuleValidationError{
+ field: "UpstreamTlsContext",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for L7Proto
+
+ switch v := m.L7.(type) {
+ case *PortNetworkPolicyRule_HttpRules:
+ if v == nil {
+ err := PortNetworkPolicyRuleValidationError{
+ field: "L7",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetHttpRules()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, PortNetworkPolicyRuleValidationError{
+ field: "HttpRules",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, PortNetworkPolicyRuleValidationError{
+ field: "HttpRules",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetHttpRules()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return PortNetworkPolicyRuleValidationError{
+ field: "HttpRules",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *PortNetworkPolicyRule_KafkaRules:
+ if v == nil {
+ err := PortNetworkPolicyRuleValidationError{
+ field: "L7",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetKafkaRules()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, PortNetworkPolicyRuleValidationError{
+ field: "KafkaRules",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, PortNetworkPolicyRuleValidationError{
+ field: "KafkaRules",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetKafkaRules()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return PortNetworkPolicyRuleValidationError{
+ field: "KafkaRules",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *PortNetworkPolicyRule_L7Rules:
+ if v == nil {
+ err := PortNetworkPolicyRuleValidationError{
+ field: "L7",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetL7Rules()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, PortNetworkPolicyRuleValidationError{
+ field: "L7Rules",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, PortNetworkPolicyRuleValidationError{
+ field: "L7Rules",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetL7Rules()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return PortNetworkPolicyRuleValidationError{
+ field: "L7Rules",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ default:
+ _ = v // ensures v is used
+ }
+
+ if len(errors) > 0 {
+ return PortNetworkPolicyRuleMultiError(errors)
+ }
+
+ return nil
+}
+
+// PortNetworkPolicyRuleMultiError is an error wrapping multiple validation
+// errors returned by PortNetworkPolicyRule.ValidateAll() if the designated
+// constraints aren't met.
+type PortNetworkPolicyRuleMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m PortNetworkPolicyRuleMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m PortNetworkPolicyRuleMultiError) AllErrors() []error { return m }
+
+// PortNetworkPolicyRuleValidationError is the validation error returned by
+// PortNetworkPolicyRule.Validate if the designated constraints aren't met.
+type PortNetworkPolicyRuleValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e PortNetworkPolicyRuleValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e PortNetworkPolicyRuleValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e PortNetworkPolicyRuleValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e PortNetworkPolicyRuleValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e PortNetworkPolicyRuleValidationError) ErrorName() string {
+ return "PortNetworkPolicyRuleValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e PortNetworkPolicyRuleValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sPortNetworkPolicyRule.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = PortNetworkPolicyRuleValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = PortNetworkPolicyRuleValidationError{}
+
+// Validate checks the field values on HttpNetworkPolicyRules with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *HttpNetworkPolicyRules) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on HttpNetworkPolicyRules with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// HttpNetworkPolicyRulesMultiError, or nil if none found.
+func (m *HttpNetworkPolicyRules) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *HttpNetworkPolicyRules) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if len(m.GetHttpRules()) < 1 {
+ err := HttpNetworkPolicyRulesValidationError{
+ field: "HttpRules",
+ reason: "value must contain at least 1 item(s)",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ for idx, item := range m.GetHttpRules() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, HttpNetworkPolicyRulesValidationError{
+ field: fmt.Sprintf("HttpRules[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, HttpNetworkPolicyRulesValidationError{
+ field: fmt.Sprintf("HttpRules[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HttpNetworkPolicyRulesValidationError{
+ field: fmt.Sprintf("HttpRules[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return HttpNetworkPolicyRulesMultiError(errors)
+ }
+
+ return nil
+}
+
+// HttpNetworkPolicyRulesMultiError is an error wrapping multiple validation
+// errors returned by HttpNetworkPolicyRules.ValidateAll() if the designated
+// constraints aren't met.
+type HttpNetworkPolicyRulesMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m HttpNetworkPolicyRulesMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m HttpNetworkPolicyRulesMultiError) AllErrors() []error { return m }
+
+// HttpNetworkPolicyRulesValidationError is the validation error returned by
+// HttpNetworkPolicyRules.Validate if the designated constraints aren't met.
+type HttpNetworkPolicyRulesValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e HttpNetworkPolicyRulesValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e HttpNetworkPolicyRulesValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e HttpNetworkPolicyRulesValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e HttpNetworkPolicyRulesValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e HttpNetworkPolicyRulesValidationError) ErrorName() string {
+ return "HttpNetworkPolicyRulesValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e HttpNetworkPolicyRulesValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sHttpNetworkPolicyRules.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = HttpNetworkPolicyRulesValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = HttpNetworkPolicyRulesValidationError{}
+
+// Validate checks the field values on HeaderMatch with the rules defined in
+// the proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *HeaderMatch) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on HeaderMatch with the rules defined in
+// the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in HeaderMatchMultiError, or
+// nil if none found.
+func (m *HeaderMatch) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *HeaderMatch) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if utf8.RuneCountInString(m.GetName()) < 1 {
+ err := HeaderMatchValidationError{
+ field: "Name",
+ reason: "value length must be at least 1 runes",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ // no validation rules for Value
+
+ // no validation rules for MatchAction
+
+ // no validation rules for MismatchAction
+
+ // no validation rules for ValueSdsSecret
+
+ if len(errors) > 0 {
+ return HeaderMatchMultiError(errors)
+ }
+
+ return nil
+}
+
+// HeaderMatchMultiError is an error wrapping multiple validation errors
+// returned by HeaderMatch.ValidateAll() if the designated constraints aren't met.
+type HeaderMatchMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m HeaderMatchMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m HeaderMatchMultiError) AllErrors() []error { return m }
+
+// HeaderMatchValidationError is the validation error returned by
+// HeaderMatch.Validate if the designated constraints aren't met.
+type HeaderMatchValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e HeaderMatchValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e HeaderMatchValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e HeaderMatchValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e HeaderMatchValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e HeaderMatchValidationError) ErrorName() string { return "HeaderMatchValidationError" }
+
+// Error satisfies the builtin error interface
+func (e HeaderMatchValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sHeaderMatch.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = HeaderMatchValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = HeaderMatchValidationError{}
+
+// Validate checks the field values on HttpNetworkPolicyRule with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *HttpNetworkPolicyRule) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on HttpNetworkPolicyRule with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// HttpNetworkPolicyRuleMultiError, or nil if none found.
+func (m *HttpNetworkPolicyRule) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *HttpNetworkPolicyRule) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ for idx, item := range m.GetHeaders() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, HttpNetworkPolicyRuleValidationError{
+ field: fmt.Sprintf("Headers[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, HttpNetworkPolicyRuleValidationError{
+ field: fmt.Sprintf("Headers[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HttpNetworkPolicyRuleValidationError{
+ field: fmt.Sprintf("Headers[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ for idx, item := range m.GetHeaderMatches() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, HttpNetworkPolicyRuleValidationError{
+ field: fmt.Sprintf("HeaderMatches[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, HttpNetworkPolicyRuleValidationError{
+ field: fmt.Sprintf("HeaderMatches[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HttpNetworkPolicyRuleValidationError{
+ field: fmt.Sprintf("HeaderMatches[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return HttpNetworkPolicyRuleMultiError(errors)
+ }
+
+ return nil
+}
+
+// HttpNetworkPolicyRuleMultiError is an error wrapping multiple validation
+// errors returned by HttpNetworkPolicyRule.ValidateAll() if the designated
+// constraints aren't met.
+type HttpNetworkPolicyRuleMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m HttpNetworkPolicyRuleMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m HttpNetworkPolicyRuleMultiError) AllErrors() []error { return m }
+
+// HttpNetworkPolicyRuleValidationError is the validation error returned by
+// HttpNetworkPolicyRule.Validate if the designated constraints aren't met.
+type HttpNetworkPolicyRuleValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e HttpNetworkPolicyRuleValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e HttpNetworkPolicyRuleValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e HttpNetworkPolicyRuleValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e HttpNetworkPolicyRuleValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e HttpNetworkPolicyRuleValidationError) ErrorName() string {
+ return "HttpNetworkPolicyRuleValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e HttpNetworkPolicyRuleValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sHttpNetworkPolicyRule.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = HttpNetworkPolicyRuleValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = HttpNetworkPolicyRuleValidationError{}
+
+// Validate checks the field values on KafkaNetworkPolicyRules with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *KafkaNetworkPolicyRules) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on KafkaNetworkPolicyRules with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// KafkaNetworkPolicyRulesMultiError, or nil if none found.
+func (m *KafkaNetworkPolicyRules) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *KafkaNetworkPolicyRules) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if len(m.GetKafkaRules()) < 1 {
+ err := KafkaNetworkPolicyRulesValidationError{
+ field: "KafkaRules",
+ reason: "value must contain at least 1 item(s)",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ for idx, item := range m.GetKafkaRules() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, KafkaNetworkPolicyRulesValidationError{
+ field: fmt.Sprintf("KafkaRules[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, KafkaNetworkPolicyRulesValidationError{
+ field: fmt.Sprintf("KafkaRules[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return KafkaNetworkPolicyRulesValidationError{
+ field: fmt.Sprintf("KafkaRules[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return KafkaNetworkPolicyRulesMultiError(errors)
+ }
+
+ return nil
+}
+
+// KafkaNetworkPolicyRulesMultiError is an error wrapping multiple validation
+// errors returned by KafkaNetworkPolicyRules.ValidateAll() if the designated
+// constraints aren't met.
+type KafkaNetworkPolicyRulesMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m KafkaNetworkPolicyRulesMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m KafkaNetworkPolicyRulesMultiError) AllErrors() []error { return m }
+
+// KafkaNetworkPolicyRulesValidationError is the validation error returned by
+// KafkaNetworkPolicyRules.Validate if the designated constraints aren't met.
+type KafkaNetworkPolicyRulesValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e KafkaNetworkPolicyRulesValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e KafkaNetworkPolicyRulesValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e KafkaNetworkPolicyRulesValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e KafkaNetworkPolicyRulesValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e KafkaNetworkPolicyRulesValidationError) ErrorName() string {
+ return "KafkaNetworkPolicyRulesValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e KafkaNetworkPolicyRulesValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sKafkaNetworkPolicyRules.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = KafkaNetworkPolicyRulesValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = KafkaNetworkPolicyRulesValidationError{}
+
+// Validate checks the field values on KafkaNetworkPolicyRule with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *KafkaNetworkPolicyRule) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on KafkaNetworkPolicyRule with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// KafkaNetworkPolicyRuleMultiError, or nil if none found.
+func (m *KafkaNetworkPolicyRule) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *KafkaNetworkPolicyRule) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for ApiVersion
+
+ if !_KafkaNetworkPolicyRule_ClientId_Pattern.MatchString(m.GetClientId()) {
+ err := KafkaNetworkPolicyRuleValidationError{
+ field: "ClientId",
+ reason: "value does not match regex pattern \"^[a-zA-Z0-9._-]*$\"",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if utf8.RuneCountInString(m.GetTopic()) > 255 {
+ err := KafkaNetworkPolicyRuleValidationError{
+ field: "Topic",
+ reason: "value length must be at most 255 runes",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if !_KafkaNetworkPolicyRule_Topic_Pattern.MatchString(m.GetTopic()) {
+ err := KafkaNetworkPolicyRuleValidationError{
+ field: "Topic",
+ reason: "value does not match regex pattern \"^[a-zA-Z0-9._-]*$\"",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return KafkaNetworkPolicyRuleMultiError(errors)
+ }
+
+ return nil
+}
+
+// KafkaNetworkPolicyRuleMultiError is an error wrapping multiple validation
+// errors returned by KafkaNetworkPolicyRule.ValidateAll() if the designated
+// constraints aren't met.
+type KafkaNetworkPolicyRuleMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m KafkaNetworkPolicyRuleMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m KafkaNetworkPolicyRuleMultiError) AllErrors() []error { return m }
+
+// KafkaNetworkPolicyRuleValidationError is the validation error returned by
+// KafkaNetworkPolicyRule.Validate if the designated constraints aren't met.
+type KafkaNetworkPolicyRuleValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e KafkaNetworkPolicyRuleValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e KafkaNetworkPolicyRuleValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e KafkaNetworkPolicyRuleValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e KafkaNetworkPolicyRuleValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e KafkaNetworkPolicyRuleValidationError) ErrorName() string {
+ return "KafkaNetworkPolicyRuleValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e KafkaNetworkPolicyRuleValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sKafkaNetworkPolicyRule.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = KafkaNetworkPolicyRuleValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = KafkaNetworkPolicyRuleValidationError{}
+
+var _KafkaNetworkPolicyRule_ClientId_Pattern = regexp.MustCompile("^[a-zA-Z0-9._-]*$")
+
+var _KafkaNetworkPolicyRule_Topic_Pattern = regexp.MustCompile("^[a-zA-Z0-9._-]*$")
+
+// Validate checks the field values on L7NetworkPolicyRules with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *L7NetworkPolicyRules) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on L7NetworkPolicyRules with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// L7NetworkPolicyRulesMultiError, or nil if none found.
+func (m *L7NetworkPolicyRules) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *L7NetworkPolicyRules) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ for idx, item := range m.GetL7AllowRules() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, L7NetworkPolicyRulesValidationError{
+ field: fmt.Sprintf("L7AllowRules[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, L7NetworkPolicyRulesValidationError{
+ field: fmt.Sprintf("L7AllowRules[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return L7NetworkPolicyRulesValidationError{
+ field: fmt.Sprintf("L7AllowRules[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ for idx, item := range m.GetL7DenyRules() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, L7NetworkPolicyRulesValidationError{
+ field: fmt.Sprintf("L7DenyRules[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, L7NetworkPolicyRulesValidationError{
+ field: fmt.Sprintf("L7DenyRules[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return L7NetworkPolicyRulesValidationError{
+ field: fmt.Sprintf("L7DenyRules[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return L7NetworkPolicyRulesMultiError(errors)
+ }
+
+ return nil
+}
+
+// L7NetworkPolicyRulesMultiError is an error wrapping multiple validation
+// errors returned by L7NetworkPolicyRules.ValidateAll() if the designated
+// constraints aren't met.
+type L7NetworkPolicyRulesMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m L7NetworkPolicyRulesMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m L7NetworkPolicyRulesMultiError) AllErrors() []error { return m }
+
+// L7NetworkPolicyRulesValidationError is the validation error returned by
+// L7NetworkPolicyRules.Validate if the designated constraints aren't met.
+type L7NetworkPolicyRulesValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e L7NetworkPolicyRulesValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e L7NetworkPolicyRulesValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e L7NetworkPolicyRulesValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e L7NetworkPolicyRulesValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e L7NetworkPolicyRulesValidationError) ErrorName() string {
+ return "L7NetworkPolicyRulesValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e L7NetworkPolicyRulesValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sL7NetworkPolicyRules.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = L7NetworkPolicyRulesValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = L7NetworkPolicyRulesValidationError{}
+
+// Validate checks the field values on L7NetworkPolicyRule with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *L7NetworkPolicyRule) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on L7NetworkPolicyRule with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// L7NetworkPolicyRuleMultiError, or nil if none found.
+func (m *L7NetworkPolicyRule) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *L7NetworkPolicyRule) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for Name
+
+ // no validation rules for Rule
+
+ for idx, item := range m.GetMetadataRule() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, L7NetworkPolicyRuleValidationError{
+ field: fmt.Sprintf("MetadataRule[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, L7NetworkPolicyRuleValidationError{
+ field: fmt.Sprintf("MetadataRule[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return L7NetworkPolicyRuleValidationError{
+ field: fmt.Sprintf("MetadataRule[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return L7NetworkPolicyRuleMultiError(errors)
+ }
+
+ return nil
+}
+
+// L7NetworkPolicyRuleMultiError is an error wrapping multiple validation
+// errors returned by L7NetworkPolicyRule.ValidateAll() if the designated
+// constraints aren't met.
+type L7NetworkPolicyRuleMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m L7NetworkPolicyRuleMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m L7NetworkPolicyRuleMultiError) AllErrors() []error { return m }
+
+// L7NetworkPolicyRuleValidationError is the validation error returned by
+// L7NetworkPolicyRule.Validate if the designated constraints aren't met.
+type L7NetworkPolicyRuleValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e L7NetworkPolicyRuleValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e L7NetworkPolicyRuleValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e L7NetworkPolicyRuleValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e L7NetworkPolicyRuleValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e L7NetworkPolicyRuleValidationError) ErrorName() string {
+ return "L7NetworkPolicyRuleValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e L7NetworkPolicyRuleValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sL7NetworkPolicyRule.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = L7NetworkPolicyRuleValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = L7NetworkPolicyRuleValidationError{}
+
+// Validate checks the field values on NetworkPoliciesConfigDump with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *NetworkPoliciesConfigDump) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on NetworkPoliciesConfigDump with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// NetworkPoliciesConfigDumpMultiError, or nil if none found.
+func (m *NetworkPoliciesConfigDump) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *NetworkPoliciesConfigDump) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ for idx, item := range m.GetNetworkpolicies() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, NetworkPoliciesConfigDumpValidationError{
+ field: fmt.Sprintf("Networkpolicies[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, NetworkPoliciesConfigDumpValidationError{
+ field: fmt.Sprintf("Networkpolicies[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return NetworkPoliciesConfigDumpValidationError{
+ field: fmt.Sprintf("Networkpolicies[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return NetworkPoliciesConfigDumpMultiError(errors)
+ }
+
+ return nil
+}
+
+// NetworkPoliciesConfigDumpMultiError is an error wrapping multiple validation
+// errors returned by NetworkPoliciesConfigDump.ValidateAll() if the
+// designated constraints aren't met.
+type NetworkPoliciesConfigDumpMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m NetworkPoliciesConfigDumpMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m NetworkPoliciesConfigDumpMultiError) AllErrors() []error { return m }
+
+// NetworkPoliciesConfigDumpValidationError is the validation error returned by
+// NetworkPoliciesConfigDump.Validate if the designated constraints aren't met.
+type NetworkPoliciesConfigDumpValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e NetworkPoliciesConfigDumpValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e NetworkPoliciesConfigDumpValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e NetworkPoliciesConfigDumpValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e NetworkPoliciesConfigDumpValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e NetworkPoliciesConfigDumpValidationError) ErrorName() string {
+ return "NetworkPoliciesConfigDumpValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e NetworkPoliciesConfigDumpValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sNetworkPoliciesConfigDump.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = NetworkPoliciesConfigDumpValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = NetworkPoliciesConfigDumpValidationError{}
diff --git a/vendor/github.com/cilium/proxy/go/cilium/api/nphds.pb.go b/vendor/github.com/cilium/proxy/go/cilium/api/nphds.pb.go
new file mode 100644
index 000000000..0f3b9c65e
--- /dev/null
+++ b/vendor/github.com/cilium/proxy/go/cilium/api/nphds.pb.go
@@ -0,0 +1,362 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.23.0
+// protoc v4.23.1
+// source: cilium/api/nphds.proto
+
+package cilium
+
+import (
+ context "context"
+ _ "github.com/cilium/proxy/go/envoy/annotations"
+ v3 "github.com/cilium/proxy/go/envoy/service/discovery/v3"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ proto "github.com/golang/protobuf/proto"
+ _ "google.golang.org/genproto/googleapis/api/annotations"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+// The mapping of a network policy identifier to the IP addresses of all the
+// hosts on which the network policy is enforced.
+// A host may be associated only with one network policy.
+type NetworkPolicyHosts struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The unique identifier of the network policy enforced on the hosts.
+ Policy uint64 `protobuf:"varint,1,opt,name=policy,proto3" json:"policy,omitempty"`
+ // The set of IP addresses of the hosts on which the network policy is
+ // enforced. Optional. May be empty.
+ HostAddresses []string `protobuf:"bytes,2,rep,name=host_addresses,json=hostAddresses,proto3" json:"host_addresses,omitempty"`
+}
+
+func (x *NetworkPolicyHosts) Reset() {
+ *x = NetworkPolicyHosts{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cilium_api_nphds_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *NetworkPolicyHosts) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*NetworkPolicyHosts) ProtoMessage() {}
+
+func (x *NetworkPolicyHosts) ProtoReflect() protoreflect.Message {
+ mi := &file_cilium_api_nphds_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use NetworkPolicyHosts.ProtoReflect.Descriptor instead.
+func (*NetworkPolicyHosts) Descriptor() ([]byte, []int) {
+ return file_cilium_api_nphds_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *NetworkPolicyHosts) GetPolicy() uint64 {
+ if x != nil {
+ return x.Policy
+ }
+ return 0
+}
+
+func (x *NetworkPolicyHosts) GetHostAddresses() []string {
+ if x != nil {
+ return x.HostAddresses
+ }
+ return nil
+}
+
+var File_cilium_api_nphds_proto protoreflect.FileDescriptor
+
+var file_cilium_api_nphds_proto_rawDesc = []byte{
+ 0x0a, 0x16, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6e, 0x70, 0x68,
+ 0x64, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d,
+ 0x1a, 0x2a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f,
+ 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2f, 0x76, 0x33, 0x2f, 0x64, 0x69, 0x73,
+ 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x72, 0x65,
+ 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61,
+ 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x69, 0x0a, 0x12, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b,
+ 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x48, 0x6f, 0x73, 0x74, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x70,
+ 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x70, 0x6f, 0x6c,
+ 0x69, 0x63, 0x79, 0x12, 0x3b, 0x0a, 0x0e, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x61, 0x64, 0x64, 0x72,
+ 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x42, 0x14, 0xfa, 0x42, 0x05,
+ 0x92, 0x01, 0x02, 0x18, 0x01, 0xfa, 0x42, 0x09, 0x92, 0x01, 0x06, 0x22, 0x04, 0x72, 0x02, 0x10,
+ 0x01, 0x52, 0x0d, 0x68, 0x6f, 0x73, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73,
+ 0x32, 0xee, 0x02, 0x0a, 0x22, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x50, 0x6f, 0x6c, 0x69,
+ 0x63, 0x79, 0x48, 0x6f, 0x73, 0x74, 0x73, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79,
+ 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x7d, 0x0a, 0x18, 0x53, 0x74, 0x72, 0x65, 0x61,
+ 0x6d, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x48, 0x6f,
+ 0x73, 0x74, 0x73, 0x12, 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x33,
+ 0x2e, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x1a, 0x2d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63,
+ 0x65, 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x33, 0x2e, 0x44,
+ 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0xa5, 0x01, 0x0a, 0x17, 0x46, 0x65, 0x74, 0x63, 0x68,
+ 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x48, 0x6f, 0x73,
+ 0x74, 0x73, 0x12, 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69,
+ 0x63, 0x65, 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x33, 0x2e,
+ 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x2d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
+ 0x2e, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x69,
+ 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
+ 0x2d, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x27, 0x22, 0x22, 0x2f, 0x76, 0x32, 0x2f, 0x64, 0x69, 0x73,
+ 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x3a, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x70,
+ 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x73, 0x3a, 0x01, 0x2a, 0x1a, 0x21,
+ 0x8a, 0xa4, 0x96, 0xf3, 0x07, 0x1b, 0x0a, 0x19, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2e, 0x4e,
+ 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x48, 0x6f, 0x73, 0x74,
+ 0x73, 0x42, 0x2e, 0x5a, 0x2c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
+ 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2f, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2f,
+ 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2f, 0x61, 0x70, 0x69, 0x3b, 0x63, 0x69, 0x6c, 0x69, 0x75,
+ 0x6d, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_cilium_api_nphds_proto_rawDescOnce sync.Once
+ file_cilium_api_nphds_proto_rawDescData = file_cilium_api_nphds_proto_rawDesc
+)
+
+func file_cilium_api_nphds_proto_rawDescGZIP() []byte {
+ file_cilium_api_nphds_proto_rawDescOnce.Do(func() {
+ file_cilium_api_nphds_proto_rawDescData = protoimpl.X.CompressGZIP(file_cilium_api_nphds_proto_rawDescData)
+ })
+ return file_cilium_api_nphds_proto_rawDescData
+}
+
+var file_cilium_api_nphds_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_cilium_api_nphds_proto_goTypes = []interface{}{
+ (*NetworkPolicyHosts)(nil), // 0: cilium.NetworkPolicyHosts
+ (*v3.DiscoveryRequest)(nil), // 1: envoy.service.discovery.v3.DiscoveryRequest
+ (*v3.DiscoveryResponse)(nil), // 2: envoy.service.discovery.v3.DiscoveryResponse
+}
+var file_cilium_api_nphds_proto_depIdxs = []int32{
+ 1, // 0: cilium.NetworkPolicyHostsDiscoveryService.StreamNetworkPolicyHosts:input_type -> envoy.service.discovery.v3.DiscoveryRequest
+ 1, // 1: cilium.NetworkPolicyHostsDiscoveryService.FetchNetworkPolicyHosts:input_type -> envoy.service.discovery.v3.DiscoveryRequest
+ 2, // 2: cilium.NetworkPolicyHostsDiscoveryService.StreamNetworkPolicyHosts:output_type -> envoy.service.discovery.v3.DiscoveryResponse
+ 2, // 3: cilium.NetworkPolicyHostsDiscoveryService.FetchNetworkPolicyHosts:output_type -> envoy.service.discovery.v3.DiscoveryResponse
+ 2, // [2:4] is the sub-list for method output_type
+ 0, // [0:2] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_cilium_api_nphds_proto_init() }
+func file_cilium_api_nphds_proto_init() {
+ if File_cilium_api_nphds_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_cilium_api_nphds_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*NetworkPolicyHosts); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_cilium_api_nphds_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 1,
+ },
+ GoTypes: file_cilium_api_nphds_proto_goTypes,
+ DependencyIndexes: file_cilium_api_nphds_proto_depIdxs,
+ MessageInfos: file_cilium_api_nphds_proto_msgTypes,
+ }.Build()
+ File_cilium_api_nphds_proto = out.File
+ file_cilium_api_nphds_proto_rawDesc = nil
+ file_cilium_api_nphds_proto_goTypes = nil
+ file_cilium_api_nphds_proto_depIdxs = nil
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConnInterface
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion6
+
+// NetworkPolicyHostsDiscoveryServiceClient is the client API for NetworkPolicyHostsDiscoveryService service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type NetworkPolicyHostsDiscoveryServiceClient interface {
+ StreamNetworkPolicyHosts(ctx context.Context, opts ...grpc.CallOption) (NetworkPolicyHostsDiscoveryService_StreamNetworkPolicyHostsClient, error)
+ FetchNetworkPolicyHosts(ctx context.Context, in *v3.DiscoveryRequest, opts ...grpc.CallOption) (*v3.DiscoveryResponse, error)
+}
+
+type networkPolicyHostsDiscoveryServiceClient struct {
+ cc grpc.ClientConnInterface
+}
+
+func NewNetworkPolicyHostsDiscoveryServiceClient(cc grpc.ClientConnInterface) NetworkPolicyHostsDiscoveryServiceClient {
+ return &networkPolicyHostsDiscoveryServiceClient{cc}
+}
+
+func (c *networkPolicyHostsDiscoveryServiceClient) StreamNetworkPolicyHosts(ctx context.Context, opts ...grpc.CallOption) (NetworkPolicyHostsDiscoveryService_StreamNetworkPolicyHostsClient, error) {
+ stream, err := c.cc.NewStream(ctx, &_NetworkPolicyHostsDiscoveryService_serviceDesc.Streams[0], "/cilium.NetworkPolicyHostsDiscoveryService/StreamNetworkPolicyHosts", opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &networkPolicyHostsDiscoveryServiceStreamNetworkPolicyHostsClient{stream}
+ return x, nil
+}
+
+type NetworkPolicyHostsDiscoveryService_StreamNetworkPolicyHostsClient interface {
+ Send(*v3.DiscoveryRequest) error
+ Recv() (*v3.DiscoveryResponse, error)
+ grpc.ClientStream
+}
+
+type networkPolicyHostsDiscoveryServiceStreamNetworkPolicyHostsClient struct {
+ grpc.ClientStream
+}
+
+func (x *networkPolicyHostsDiscoveryServiceStreamNetworkPolicyHostsClient) Send(m *v3.DiscoveryRequest) error {
+ return x.ClientStream.SendMsg(m)
+}
+
+func (x *networkPolicyHostsDiscoveryServiceStreamNetworkPolicyHostsClient) Recv() (*v3.DiscoveryResponse, error) {
+ m := new(v3.DiscoveryResponse)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func (c *networkPolicyHostsDiscoveryServiceClient) FetchNetworkPolicyHosts(ctx context.Context, in *v3.DiscoveryRequest, opts ...grpc.CallOption) (*v3.DiscoveryResponse, error) {
+ out := new(v3.DiscoveryResponse)
+ err := c.cc.Invoke(ctx, "/cilium.NetworkPolicyHostsDiscoveryService/FetchNetworkPolicyHosts", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// NetworkPolicyHostsDiscoveryServiceServer is the server API for NetworkPolicyHostsDiscoveryService service.
+type NetworkPolicyHostsDiscoveryServiceServer interface {
+ StreamNetworkPolicyHosts(NetworkPolicyHostsDiscoveryService_StreamNetworkPolicyHostsServer) error
+ FetchNetworkPolicyHosts(context.Context, *v3.DiscoveryRequest) (*v3.DiscoveryResponse, error)
+}
+
+// UnimplementedNetworkPolicyHostsDiscoveryServiceServer can be embedded to have forward compatible implementations.
+type UnimplementedNetworkPolicyHostsDiscoveryServiceServer struct {
+}
+
+func (*UnimplementedNetworkPolicyHostsDiscoveryServiceServer) StreamNetworkPolicyHosts(NetworkPolicyHostsDiscoveryService_StreamNetworkPolicyHostsServer) error {
+ return status.Errorf(codes.Unimplemented, "method StreamNetworkPolicyHosts not implemented")
+}
+func (*UnimplementedNetworkPolicyHostsDiscoveryServiceServer) FetchNetworkPolicyHosts(context.Context, *v3.DiscoveryRequest) (*v3.DiscoveryResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method FetchNetworkPolicyHosts not implemented")
+}
+
+func RegisterNetworkPolicyHostsDiscoveryServiceServer(s *grpc.Server, srv NetworkPolicyHostsDiscoveryServiceServer) {
+ s.RegisterService(&_NetworkPolicyHostsDiscoveryService_serviceDesc, srv)
+}
+
+func _NetworkPolicyHostsDiscoveryService_StreamNetworkPolicyHosts_Handler(srv interface{}, stream grpc.ServerStream) error {
+ return srv.(NetworkPolicyHostsDiscoveryServiceServer).StreamNetworkPolicyHosts(&networkPolicyHostsDiscoveryServiceStreamNetworkPolicyHostsServer{stream})
+}
+
+type NetworkPolicyHostsDiscoveryService_StreamNetworkPolicyHostsServer interface {
+ Send(*v3.DiscoveryResponse) error
+ Recv() (*v3.DiscoveryRequest, error)
+ grpc.ServerStream
+}
+
+type networkPolicyHostsDiscoveryServiceStreamNetworkPolicyHostsServer struct {
+ grpc.ServerStream
+}
+
+func (x *networkPolicyHostsDiscoveryServiceStreamNetworkPolicyHostsServer) Send(m *v3.DiscoveryResponse) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+func (x *networkPolicyHostsDiscoveryServiceStreamNetworkPolicyHostsServer) Recv() (*v3.DiscoveryRequest, error) {
+ m := new(v3.DiscoveryRequest)
+ if err := x.ServerStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func _NetworkPolicyHostsDiscoveryService_FetchNetworkPolicyHosts_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(v3.DiscoveryRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(NetworkPolicyHostsDiscoveryServiceServer).FetchNetworkPolicyHosts(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/cilium.NetworkPolicyHostsDiscoveryService/FetchNetworkPolicyHosts",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(NetworkPolicyHostsDiscoveryServiceServer).FetchNetworkPolicyHosts(ctx, req.(*v3.DiscoveryRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _NetworkPolicyHostsDiscoveryService_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "cilium.NetworkPolicyHostsDiscoveryService",
+ HandlerType: (*NetworkPolicyHostsDiscoveryServiceServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "FetchNetworkPolicyHosts",
+ Handler: _NetworkPolicyHostsDiscoveryService_FetchNetworkPolicyHosts_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{
+ {
+ StreamName: "StreamNetworkPolicyHosts",
+ Handler: _NetworkPolicyHostsDiscoveryService_StreamNetworkPolicyHosts_Handler,
+ ServerStreams: true,
+ ClientStreams: true,
+ },
+ },
+ Metadata: "cilium/api/nphds.proto",
+}
diff --git a/vendor/github.com/cilium/proxy/go/cilium/api/nphds.pb.validate.go b/vendor/github.com/cilium/proxy/go/cilium/api/nphds.pb.validate.go
new file mode 100644
index 000000000..6ea201221
--- /dev/null
+++ b/vendor/github.com/cilium/proxy/go/cilium/api/nphds.pb.validate.go
@@ -0,0 +1,171 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: cilium/api/nphds.proto
+
+package cilium
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on NetworkPolicyHosts with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *NetworkPolicyHosts) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on NetworkPolicyHosts with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// NetworkPolicyHostsMultiError, or nil if none found.
+func (m *NetworkPolicyHosts) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *NetworkPolicyHosts) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for Policy
+
+ _NetworkPolicyHosts_HostAddresses_Unique := make(map[string]struct{}, len(m.GetHostAddresses()))
+
+ for idx, item := range m.GetHostAddresses() {
+ _, _ = idx, item
+
+ if _, exists := _NetworkPolicyHosts_HostAddresses_Unique[item]; exists {
+ err := NetworkPolicyHostsValidationError{
+ field: fmt.Sprintf("HostAddresses[%v]", idx),
+ reason: "repeated value must contain unique items",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ } else {
+ _NetworkPolicyHosts_HostAddresses_Unique[item] = struct{}{}
+ }
+
+ if utf8.RuneCountInString(item) < 1 {
+ err := NetworkPolicyHostsValidationError{
+ field: fmt.Sprintf("HostAddresses[%v]", idx),
+ reason: "value length must be at least 1 runes",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return NetworkPolicyHostsMultiError(errors)
+ }
+
+ return nil
+}
+
+// NetworkPolicyHostsMultiError is an error wrapping multiple validation errors
+// returned by NetworkPolicyHosts.ValidateAll() if the designated constraints
+// aren't met.
+type NetworkPolicyHostsMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m NetworkPolicyHostsMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m NetworkPolicyHostsMultiError) AllErrors() []error { return m }
+
+// NetworkPolicyHostsValidationError is the validation error returned by
+// NetworkPolicyHosts.Validate if the designated constraints aren't met.
+type NetworkPolicyHostsValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e NetworkPolicyHostsValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e NetworkPolicyHostsValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e NetworkPolicyHostsValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e NetworkPolicyHostsValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e NetworkPolicyHostsValidationError) ErrorName() string {
+ return "NetworkPolicyHostsValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e NetworkPolicyHostsValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sNetworkPolicyHosts.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = NetworkPolicyHostsValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = NetworkPolicyHostsValidationError{}
diff --git a/vendor/github.com/cilium/proxy/go/cilium/api/tls_wrapper.pb.go b/vendor/github.com/cilium/proxy/go/cilium/api/tls_wrapper.pb.go
new file mode 100644
index 000000000..e46dbabcd
--- /dev/null
+++ b/vendor/github.com/cilium/proxy/go/cilium/api/tls_wrapper.pb.go
@@ -0,0 +1,194 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.23.0
+// protoc v4.23.1
+// source: cilium/api/tls_wrapper.proto
+
+package cilium
+
+import (
+ proto "github.com/golang/protobuf/proto"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+// Empty configuration messages for Cilium TLS wrapper to make Envoy happy
+type UpstreamTlsWrapperContext struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *UpstreamTlsWrapperContext) Reset() {
+ *x = UpstreamTlsWrapperContext{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cilium_api_tls_wrapper_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *UpstreamTlsWrapperContext) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UpstreamTlsWrapperContext) ProtoMessage() {}
+
+func (x *UpstreamTlsWrapperContext) ProtoReflect() protoreflect.Message {
+ mi := &file_cilium_api_tls_wrapper_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UpstreamTlsWrapperContext.ProtoReflect.Descriptor instead.
+func (*UpstreamTlsWrapperContext) Descriptor() ([]byte, []int) {
+ return file_cilium_api_tls_wrapper_proto_rawDescGZIP(), []int{0}
+}
+
+type DownstreamTlsWrapperContext struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *DownstreamTlsWrapperContext) Reset() {
+ *x = DownstreamTlsWrapperContext{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cilium_api_tls_wrapper_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DownstreamTlsWrapperContext) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DownstreamTlsWrapperContext) ProtoMessage() {}
+
+func (x *DownstreamTlsWrapperContext) ProtoReflect() protoreflect.Message {
+ mi := &file_cilium_api_tls_wrapper_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DownstreamTlsWrapperContext.ProtoReflect.Descriptor instead.
+func (*DownstreamTlsWrapperContext) Descriptor() ([]byte, []int) {
+ return file_cilium_api_tls_wrapper_proto_rawDescGZIP(), []int{1}
+}
+
+var File_cilium_api_tls_wrapper_proto protoreflect.FileDescriptor
+
+var file_cilium_api_tls_wrapper_proto_rawDesc = []byte{
+ 0x0a, 0x1c, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x6c, 0x73,
+ 0x5f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06,
+ 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x22, 0x1b, 0x0a, 0x19, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65,
+ 0x61, 0x6d, 0x54, 0x6c, 0x73, 0x57, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x74,
+ 0x65, 0x78, 0x74, 0x22, 0x1d, 0x0a, 0x1b, 0x44, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61,
+ 0x6d, 0x54, 0x6c, 0x73, 0x57, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x74, 0x65,
+ 0x78, 0x74, 0x42, 0x2e, 0x5a, 0x2c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d,
+ 0x2f, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2f, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f,
+ 0x2f, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2f, 0x61, 0x70, 0x69, 0x3b, 0x63, 0x69, 0x6c, 0x69,
+ 0x75, 0x6d, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_cilium_api_tls_wrapper_proto_rawDescOnce sync.Once
+ file_cilium_api_tls_wrapper_proto_rawDescData = file_cilium_api_tls_wrapper_proto_rawDesc
+)
+
+func file_cilium_api_tls_wrapper_proto_rawDescGZIP() []byte {
+ file_cilium_api_tls_wrapper_proto_rawDescOnce.Do(func() {
+ file_cilium_api_tls_wrapper_proto_rawDescData = protoimpl.X.CompressGZIP(file_cilium_api_tls_wrapper_proto_rawDescData)
+ })
+ return file_cilium_api_tls_wrapper_proto_rawDescData
+}
+
+var file_cilium_api_tls_wrapper_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_cilium_api_tls_wrapper_proto_goTypes = []interface{}{
+ (*UpstreamTlsWrapperContext)(nil), // 0: cilium.UpstreamTlsWrapperContext
+ (*DownstreamTlsWrapperContext)(nil), // 1: cilium.DownstreamTlsWrapperContext
+}
+var file_cilium_api_tls_wrapper_proto_depIdxs = []int32{
+ 0, // [0:0] is the sub-list for method output_type
+ 0, // [0:0] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_cilium_api_tls_wrapper_proto_init() }
+func file_cilium_api_tls_wrapper_proto_init() {
+ if File_cilium_api_tls_wrapper_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_cilium_api_tls_wrapper_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*UpstreamTlsWrapperContext); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cilium_api_tls_wrapper_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DownstreamTlsWrapperContext); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_cilium_api_tls_wrapper_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 2,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_cilium_api_tls_wrapper_proto_goTypes,
+ DependencyIndexes: file_cilium_api_tls_wrapper_proto_depIdxs,
+ MessageInfos: file_cilium_api_tls_wrapper_proto_msgTypes,
+ }.Build()
+ File_cilium_api_tls_wrapper_proto = out.File
+ file_cilium_api_tls_wrapper_proto_rawDesc = nil
+ file_cilium_api_tls_wrapper_proto_goTypes = nil
+ file_cilium_api_tls_wrapper_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cilium/proxy/go/cilium/api/tls_wrapper.pb.validate.go b/vendor/github.com/cilium/proxy/go/cilium/api/tls_wrapper.pb.validate.go
new file mode 100644
index 000000000..b5da26f8b
--- /dev/null
+++ b/vendor/github.com/cilium/proxy/go/cilium/api/tls_wrapper.pb.validate.go
@@ -0,0 +1,241 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: cilium/api/tls_wrapper.proto
+
+package cilium
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on UpstreamTlsWrapperContext with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *UpstreamTlsWrapperContext) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on UpstreamTlsWrapperContext with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// UpstreamTlsWrapperContextMultiError, or nil if none found.
+func (m *UpstreamTlsWrapperContext) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *UpstreamTlsWrapperContext) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if len(errors) > 0 {
+ return UpstreamTlsWrapperContextMultiError(errors)
+ }
+
+ return nil
+}
+
+// UpstreamTlsWrapperContextMultiError is an error wrapping multiple validation
+// errors returned by UpstreamTlsWrapperContext.ValidateAll() if the
+// designated constraints aren't met.
+type UpstreamTlsWrapperContextMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m UpstreamTlsWrapperContextMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m UpstreamTlsWrapperContextMultiError) AllErrors() []error { return m }
+
+// UpstreamTlsWrapperContextValidationError is the validation error returned by
+// UpstreamTlsWrapperContext.Validate if the designated constraints aren't met.
+type UpstreamTlsWrapperContextValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e UpstreamTlsWrapperContextValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e UpstreamTlsWrapperContextValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e UpstreamTlsWrapperContextValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e UpstreamTlsWrapperContextValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e UpstreamTlsWrapperContextValidationError) ErrorName() string {
+ return "UpstreamTlsWrapperContextValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e UpstreamTlsWrapperContextValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sUpstreamTlsWrapperContext.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = UpstreamTlsWrapperContextValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = UpstreamTlsWrapperContextValidationError{}
+
+// Validate checks the field values on DownstreamTlsWrapperContext with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *DownstreamTlsWrapperContext) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on DownstreamTlsWrapperContext with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// DownstreamTlsWrapperContextMultiError, or nil if none found.
+func (m *DownstreamTlsWrapperContext) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *DownstreamTlsWrapperContext) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if len(errors) > 0 {
+ return DownstreamTlsWrapperContextMultiError(errors)
+ }
+
+ return nil
+}
+
+// DownstreamTlsWrapperContextMultiError is an error wrapping multiple
+// validation errors returned by DownstreamTlsWrapperContext.ValidateAll() if
+// the designated constraints aren't met.
+type DownstreamTlsWrapperContextMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m DownstreamTlsWrapperContextMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m DownstreamTlsWrapperContextMultiError) AllErrors() []error { return m }
+
+// DownstreamTlsWrapperContextValidationError is the validation error returned
+// by DownstreamTlsWrapperContext.Validate if the designated constraints
+// aren't met.
+type DownstreamTlsWrapperContextValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e DownstreamTlsWrapperContextValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e DownstreamTlsWrapperContextValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e DownstreamTlsWrapperContextValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e DownstreamTlsWrapperContextValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e DownstreamTlsWrapperContextValidationError) ErrorName() string {
+ return "DownstreamTlsWrapperContextValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e DownstreamTlsWrapperContextValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sDownstreamTlsWrapperContext.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = DownstreamTlsWrapperContextValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = DownstreamTlsWrapperContextValidationError{}
diff --git a/vendor/github.com/cilium/proxy/go/cilium/api/websocket.pb.go b/vendor/github.com/cilium/proxy/go/cilium/api/websocket.pb.go
new file mode 100644
index 000000000..9ce27a2f8
--- /dev/null
+++ b/vendor/github.com/cilium/proxy/go/cilium/api/websocket.pb.go
@@ -0,0 +1,411 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.23.0
+// protoc v4.23.1
+// source: cilium/api/websocket.proto
+
+package cilium
+
+import (
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ proto "github.com/golang/protobuf/proto"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ durationpb "google.golang.org/protobuf/types/known/durationpb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+type WebSocketClient struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Path to the unix domain socket for the cilium access log, if any.
+ AccessLogPath string `protobuf:"bytes,1,opt,name=access_log_path,json=accessLogPath,proto3" json:"access_log_path,omitempty"`
+ // Host header value, required.
+ Host string `protobuf:"bytes,2,opt,name=host,proto3" json:"host,omitempty"`
+ // Path value. Defaults to "/".
+ Path string `protobuf:"bytes,3,opt,name=path,proto3" json:"path,omitempty"`
+ // sec-websocket-key value to use, defaults to a random key.
+ Key string `protobuf:"bytes,4,opt,name=key,proto3" json:"key,omitempty"`
+ // Websocket version, defaults to "13".
+ Version string `protobuf:"bytes,5,opt,name=version,proto3" json:"version,omitempty"`
+ // Origin header, if any.
+ Origin string `protobuf:"bytes,6,opt,name=origin,proto3" json:"origin,omitempty"`
+ // Websocket handshake timeout, default is 5 seconds.
+ HandshakeTimeout *durationpb.Duration `protobuf:"bytes,7,opt,name=handshake_timeout,json=handshakeTimeout,proto3" json:"handshake_timeout,omitempty"`
+ // ping interval, default is 0 (disabled).
+ // Connection is assumed dead if response is not received before the next ping is to be sent.
+ PingInterval *durationpb.Duration `protobuf:"bytes,8,opt,name=ping_interval,json=pingInterval,proto3" json:"ping_interval,omitempty"`
+ // ping only on when idle on both directions.
+ // ping_interval must be non-zero when this is true.
+ PingWhenIdle bool `protobuf:"varint,9,opt,name=ping_when_idle,json=pingWhenIdle,proto3" json:"ping_when_idle,omitempty"`
+}
+
+func (x *WebSocketClient) Reset() {
+ *x = WebSocketClient{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cilium_api_websocket_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *WebSocketClient) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*WebSocketClient) ProtoMessage() {}
+
+func (x *WebSocketClient) ProtoReflect() protoreflect.Message {
+ mi := &file_cilium_api_websocket_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use WebSocketClient.ProtoReflect.Descriptor instead.
+func (*WebSocketClient) Descriptor() ([]byte, []int) {
+ return file_cilium_api_websocket_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *WebSocketClient) GetAccessLogPath() string {
+ if x != nil {
+ return x.AccessLogPath
+ }
+ return ""
+}
+
+func (x *WebSocketClient) GetHost() string {
+ if x != nil {
+ return x.Host
+ }
+ return ""
+}
+
+func (x *WebSocketClient) GetPath() string {
+ if x != nil {
+ return x.Path
+ }
+ return ""
+}
+
+func (x *WebSocketClient) GetKey() string {
+ if x != nil {
+ return x.Key
+ }
+ return ""
+}
+
+func (x *WebSocketClient) GetVersion() string {
+ if x != nil {
+ return x.Version
+ }
+ return ""
+}
+
+func (x *WebSocketClient) GetOrigin() string {
+ if x != nil {
+ return x.Origin
+ }
+ return ""
+}
+
+func (x *WebSocketClient) GetHandshakeTimeout() *durationpb.Duration {
+ if x != nil {
+ return x.HandshakeTimeout
+ }
+ return nil
+}
+
+func (x *WebSocketClient) GetPingInterval() *durationpb.Duration {
+ if x != nil {
+ return x.PingInterval
+ }
+ return nil
+}
+
+func (x *WebSocketClient) GetPingWhenIdle() bool {
+ if x != nil {
+ return x.PingWhenIdle
+ }
+ return false
+}
+
+type WebSocketServer struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Path to the unix domain socket for the cilium access log, if any.
+ AccessLogPath string `protobuf:"bytes,1,opt,name=access_log_path,json=accessLogPath,proto3" json:"access_log_path,omitempty"`
+ // Expected host header value, if any.
+ Host string `protobuf:"bytes,2,opt,name=host,proto3" json:"host,omitempty"`
+ // Expected path value, if any.
+ Path string `protobuf:"bytes,3,opt,name=path,proto3" json:"path,omitempty"`
+ // sec-websocket-key value to expect, if any.
+ Key string `protobuf:"bytes,4,opt,name=key,proto3" json:"key,omitempty"`
+ // Websocket version, ignored if omitted.
+ Version string `protobuf:"bytes,5,opt,name=version,proto3" json:"version,omitempty"`
+ // Origin header, if any. Origin header is not allowed if omitted.
+ Origin string `protobuf:"bytes,6,opt,name=origin,proto3" json:"origin,omitempty"`
+ // Websocket handshake timeout, default is 5 seconds.
+ HandshakeTimeout *durationpb.Duration `protobuf:"bytes,7,opt,name=handshake_timeout,json=handshakeTimeout,proto3" json:"handshake_timeout,omitempty"`
+ // ping interval, default is 0 (disabled).
+ // Connection is assumed dead if response is not received before the next ping is to be sent.
+ PingInterval *durationpb.Duration `protobuf:"bytes,8,opt,name=ping_interval,json=pingInterval,proto3" json:"ping_interval,omitempty"`
+ // ping only on when idle on both directions.
+ // ping_interval must be non-zero when this is true.
+ PingWhenIdle bool `protobuf:"varint,9,opt,name=ping_when_idle,json=pingWhenIdle,proto3" json:"ping_when_idle,omitempty"`
+}
+
+func (x *WebSocketServer) Reset() {
+ *x = WebSocketServer{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cilium_api_websocket_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *WebSocketServer) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*WebSocketServer) ProtoMessage() {}
+
+func (x *WebSocketServer) ProtoReflect() protoreflect.Message {
+ mi := &file_cilium_api_websocket_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use WebSocketServer.ProtoReflect.Descriptor instead.
+func (*WebSocketServer) Descriptor() ([]byte, []int) {
+ return file_cilium_api_websocket_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *WebSocketServer) GetAccessLogPath() string {
+ if x != nil {
+ return x.AccessLogPath
+ }
+ return ""
+}
+
+func (x *WebSocketServer) GetHost() string {
+ if x != nil {
+ return x.Host
+ }
+ return ""
+}
+
+func (x *WebSocketServer) GetPath() string {
+ if x != nil {
+ return x.Path
+ }
+ return ""
+}
+
+func (x *WebSocketServer) GetKey() string {
+ if x != nil {
+ return x.Key
+ }
+ return ""
+}
+
+func (x *WebSocketServer) GetVersion() string {
+ if x != nil {
+ return x.Version
+ }
+ return ""
+}
+
+func (x *WebSocketServer) GetOrigin() string {
+ if x != nil {
+ return x.Origin
+ }
+ return ""
+}
+
+func (x *WebSocketServer) GetHandshakeTimeout() *durationpb.Duration {
+ if x != nil {
+ return x.HandshakeTimeout
+ }
+ return nil
+}
+
+func (x *WebSocketServer) GetPingInterval() *durationpb.Duration {
+ if x != nil {
+ return x.PingInterval
+ }
+ return nil
+}
+
+func (x *WebSocketServer) GetPingWhenIdle() bool {
+ if x != nil {
+ return x.PingWhenIdle
+ }
+ return false
+}
+
+var File_cilium_api_websocket_proto protoreflect.FileDescriptor
+
+var file_cilium_api_websocket_proto_rawDesc = []byte{
+ 0x0a, 0x1a, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x77, 0x65, 0x62,
+ 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x63, 0x69,
+ 0x6c, 0x69, 0x75, 0x6d, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76,
+ 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xdc, 0x02,
+ 0x0a, 0x0f, 0x57, 0x65, 0x62, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e,
+ 0x74, 0x12, 0x26, 0x0a, 0x0f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6c, 0x6f, 0x67, 0x5f,
+ 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x61, 0x63, 0x63, 0x65,
+ 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x50, 0x61, 0x74, 0x68, 0x12, 0x1b, 0x0a, 0x04, 0x68, 0x6f, 0x73,
+ 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x02,
+ 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65,
+ 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x18, 0x0a, 0x07,
+ 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76,
+ 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e,
+ 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x12, 0x46,
+ 0x0a, 0x11, 0x68, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65,
+ 0x6f, 0x75, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x68, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x54,
+ 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x3e, 0x0a, 0x0d, 0x70, 0x69, 0x6e, 0x67, 0x5f, 0x69,
+ 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+ 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x70, 0x69, 0x6e, 0x67, 0x49, 0x6e,
+ 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x24, 0x0a, 0x0e, 0x70, 0x69, 0x6e, 0x67, 0x5f, 0x77,
+ 0x68, 0x65, 0x6e, 0x5f, 0x69, 0x64, 0x6c, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c,
+ 0x70, 0x69, 0x6e, 0x67, 0x57, 0x68, 0x65, 0x6e, 0x49, 0x64, 0x6c, 0x65, 0x22, 0xd3, 0x02, 0x0a,
+ 0x0f, 0x57, 0x65, 0x62, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72,
+ 0x12, 0x26, 0x0a, 0x0f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x70,
+ 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x61, 0x63, 0x63, 0x65, 0x73,
+ 0x73, 0x4c, 0x6f, 0x67, 0x50, 0x61, 0x74, 0x68, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04,
+ 0x70, 0x61, 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68,
+ 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b,
+ 0x65, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06,
+ 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6f, 0x72,
+ 0x69, 0x67, 0x69, 0x6e, 0x12, 0x46, 0x0a, 0x11, 0x68, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b,
+ 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x68, 0x61, 0x6e, 0x64,
+ 0x73, 0x68, 0x61, 0x6b, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x3e, 0x0a, 0x0d,
+ 0x70, 0x69, 0x6e, 0x67, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x08, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c,
+ 0x70, 0x69, 0x6e, 0x67, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x24, 0x0a, 0x0e,
+ 0x70, 0x69, 0x6e, 0x67, 0x5f, 0x77, 0x68, 0x65, 0x6e, 0x5f, 0x69, 0x64, 0x6c, 0x65, 0x18, 0x09,
+ 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x70, 0x69, 0x6e, 0x67, 0x57, 0x68, 0x65, 0x6e, 0x49, 0x64,
+ 0x6c, 0x65, 0x42, 0x2e, 0x5a, 0x2c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d,
+ 0x2f, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2f, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f,
+ 0x2f, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2f, 0x61, 0x70, 0x69, 0x3b, 0x63, 0x69, 0x6c, 0x69,
+ 0x75, 0x6d, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_cilium_api_websocket_proto_rawDescOnce sync.Once
+ file_cilium_api_websocket_proto_rawDescData = file_cilium_api_websocket_proto_rawDesc
+)
+
+func file_cilium_api_websocket_proto_rawDescGZIP() []byte {
+ file_cilium_api_websocket_proto_rawDescOnce.Do(func() {
+ file_cilium_api_websocket_proto_rawDescData = protoimpl.X.CompressGZIP(file_cilium_api_websocket_proto_rawDescData)
+ })
+ return file_cilium_api_websocket_proto_rawDescData
+}
+
+var file_cilium_api_websocket_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_cilium_api_websocket_proto_goTypes = []interface{}{
+ (*WebSocketClient)(nil), // 0: cilium.WebSocketClient
+ (*WebSocketServer)(nil), // 1: cilium.WebSocketServer
+ (*durationpb.Duration)(nil), // 2: google.protobuf.Duration
+}
+var file_cilium_api_websocket_proto_depIdxs = []int32{
+ 2, // 0: cilium.WebSocketClient.handshake_timeout:type_name -> google.protobuf.Duration
+ 2, // 1: cilium.WebSocketClient.ping_interval:type_name -> google.protobuf.Duration
+ 2, // 2: cilium.WebSocketServer.handshake_timeout:type_name -> google.protobuf.Duration
+ 2, // 3: cilium.WebSocketServer.ping_interval:type_name -> google.protobuf.Duration
+ 4, // [4:4] is the sub-list for method output_type
+ 4, // [4:4] is the sub-list for method input_type
+ 4, // [4:4] is the sub-list for extension type_name
+ 4, // [4:4] is the sub-list for extension extendee
+ 0, // [0:4] is the sub-list for field type_name
+}
+
+func init() { file_cilium_api_websocket_proto_init() }
+func file_cilium_api_websocket_proto_init() {
+ if File_cilium_api_websocket_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_cilium_api_websocket_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*WebSocketClient); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cilium_api_websocket_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*WebSocketServer); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_cilium_api_websocket_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 2,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_cilium_api_websocket_proto_goTypes,
+ DependencyIndexes: file_cilium_api_websocket_proto_depIdxs,
+ MessageInfos: file_cilium_api_websocket_proto_msgTypes,
+ }.Build()
+ File_cilium_api_websocket_proto = out.File
+ file_cilium_api_websocket_proto_rawDesc = nil
+ file_cilium_api_websocket_proto_goTypes = nil
+ file_cilium_api_websocket_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cilium/proxy/go/cilium/api/websocket.pb.validate.go b/vendor/github.com/cilium/proxy/go/cilium/api/websocket.pb.validate.go
new file mode 100644
index 000000000..9f7a8b25c
--- /dev/null
+++ b/vendor/github.com/cilium/proxy/go/cilium/api/websocket.pb.validate.go
@@ -0,0 +1,389 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: cilium/api/websocket.proto
+
+package cilium
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on WebSocketClient with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// first error encountered is returned, or nil if there are no violations.
+func (m *WebSocketClient) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on WebSocketClient with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// WebSocketClientMultiError, or nil if none found.
+func (m *WebSocketClient) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *WebSocketClient) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for AccessLogPath
+
+ if utf8.RuneCountInString(m.GetHost()) < 2 {
+ err := WebSocketClientValidationError{
+ field: "Host",
+ reason: "value length must be at least 2 runes",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ // no validation rules for Path
+
+ // no validation rules for Key
+
+ // no validation rules for Version
+
+ // no validation rules for Origin
+
+ if all {
+ switch v := interface{}(m.GetHandshakeTimeout()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, WebSocketClientValidationError{
+ field: "HandshakeTimeout",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, WebSocketClientValidationError{
+ field: "HandshakeTimeout",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetHandshakeTimeout()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return WebSocketClientValidationError{
+ field: "HandshakeTimeout",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetPingInterval()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, WebSocketClientValidationError{
+ field: "PingInterval",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, WebSocketClientValidationError{
+ field: "PingInterval",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetPingInterval()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return WebSocketClientValidationError{
+ field: "PingInterval",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for PingWhenIdle
+
+ if len(errors) > 0 {
+ return WebSocketClientMultiError(errors)
+ }
+
+ return nil
+}
+
+// WebSocketClientMultiError is an error wrapping multiple validation errors
+// returned by WebSocketClient.ValidateAll() if the designated constraints
+// aren't met.
+type WebSocketClientMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m WebSocketClientMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m WebSocketClientMultiError) AllErrors() []error { return m }
+
+// WebSocketClientValidationError is the validation error returned by
+// WebSocketClient.Validate if the designated constraints aren't met.
+type WebSocketClientValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e WebSocketClientValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e WebSocketClientValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e WebSocketClientValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e WebSocketClientValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e WebSocketClientValidationError) ErrorName() string { return "WebSocketClientValidationError" }
+
+// Error satisfies the builtin error interface
+func (e WebSocketClientValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sWebSocketClient.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = WebSocketClientValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = WebSocketClientValidationError{}
+
+// Validate checks the field values on WebSocketServer with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// first error encountered is returned, or nil if there are no violations.
+func (m *WebSocketServer) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on WebSocketServer with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// WebSocketServerMultiError, or nil if none found.
+func (m *WebSocketServer) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *WebSocketServer) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for AccessLogPath
+
+ // no validation rules for Host
+
+ // no validation rules for Path
+
+ // no validation rules for Key
+
+ // no validation rules for Version
+
+ // no validation rules for Origin
+
+ if all {
+ switch v := interface{}(m.GetHandshakeTimeout()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, WebSocketServerValidationError{
+ field: "HandshakeTimeout",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, WebSocketServerValidationError{
+ field: "HandshakeTimeout",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetHandshakeTimeout()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return WebSocketServerValidationError{
+ field: "HandshakeTimeout",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetPingInterval()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, WebSocketServerValidationError{
+ field: "PingInterval",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, WebSocketServerValidationError{
+ field: "PingInterval",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetPingInterval()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return WebSocketServerValidationError{
+ field: "PingInterval",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for PingWhenIdle
+
+ if len(errors) > 0 {
+ return WebSocketServerMultiError(errors)
+ }
+
+ return nil
+}
+
+// WebSocketServerMultiError is an error wrapping multiple validation errors
+// returned by WebSocketServer.ValidateAll() if the designated constraints
+// aren't met.
+type WebSocketServerMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m WebSocketServerMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m WebSocketServerMultiError) AllErrors() []error { return m }
+
+// WebSocketServerValidationError is the validation error returned by
+// WebSocketServer.Validate if the designated constraints aren't met.
+type WebSocketServerValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e WebSocketServerValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e WebSocketServerValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e WebSocketServerValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e WebSocketServerValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e WebSocketServerValidationError) ErrorName() string { return "WebSocketServerValidationError" }
+
+// Error satisfies the builtin error interface
+func (e WebSocketServerValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sWebSocketServer.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = WebSocketServerValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = WebSocketServerValidationError{}
diff --git a/vendor/github.com/cilium/proxy/go/envoy/annotations/deprecation.pb.go b/vendor/github.com/cilium/proxy/go/envoy/annotations/deprecation.pb.go
new file mode 100644
index 000000000..b18b44ba6
--- /dev/null
+++ b/vendor/github.com/cilium/proxy/go/envoy/annotations/deprecation.pb.go
@@ -0,0 +1,164 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.23.0
+// protoc v4.23.1
+// source: envoy/annotations/deprecation.proto
+
+package annotations
+
+import (
+ proto "github.com/golang/protobuf/proto"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ descriptorpb "google.golang.org/protobuf/types/descriptorpb"
+ reflect "reflect"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+var file_envoy_annotations_deprecation_proto_extTypes = []protoimpl.ExtensionInfo{
+ {
+ ExtendedType: (*descriptorpb.FieldOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 189503207,
+ Name: "envoy.annotations.disallowed_by_default",
+ Tag: "varint,189503207,opt,name=disallowed_by_default",
+ Filename: "envoy/annotations/deprecation.proto",
+ },
+ {
+ ExtendedType: (*descriptorpb.FieldOptions)(nil),
+ ExtensionType: (*string)(nil),
+ Field: 157299826,
+ Name: "envoy.annotations.deprecated_at_minor_version",
+ Tag: "bytes,157299826,opt,name=deprecated_at_minor_version",
+ Filename: "envoy/annotations/deprecation.proto",
+ },
+ {
+ ExtendedType: (*descriptorpb.EnumValueOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 70100853,
+ Name: "envoy.annotations.disallowed_by_default_enum",
+ Tag: "varint,70100853,opt,name=disallowed_by_default_enum",
+ Filename: "envoy/annotations/deprecation.proto",
+ },
+ {
+ ExtendedType: (*descriptorpb.EnumValueOptions)(nil),
+ ExtensionType: (*string)(nil),
+ Field: 181198657,
+ Name: "envoy.annotations.deprecated_at_minor_version_enum",
+ Tag: "bytes,181198657,opt,name=deprecated_at_minor_version_enum",
+ Filename: "envoy/annotations/deprecation.proto",
+ },
+}
+
+// Extension fields to descriptorpb.FieldOptions.
+var (
+ // optional bool disallowed_by_default = 189503207;
+ E_DisallowedByDefault = &file_envoy_annotations_deprecation_proto_extTypes[0]
+ // The API major and minor version on which the field was deprecated
+ // (e.g., "3.5" for major version 3 and minor version 5).
+ //
+ // optional string deprecated_at_minor_version = 157299826;
+ E_DeprecatedAtMinorVersion = &file_envoy_annotations_deprecation_proto_extTypes[1]
+)
+
+// Extension fields to descriptorpb.EnumValueOptions.
+var (
+ // optional bool disallowed_by_default_enum = 70100853;
+ E_DisallowedByDefaultEnum = &file_envoy_annotations_deprecation_proto_extTypes[2]
+ // The API major and minor version on which the enum value was deprecated
+ // (e.g., "3.5" for major version 3 and minor version 5).
+ //
+ // optional string deprecated_at_minor_version_enum = 181198657;
+ E_DeprecatedAtMinorVersionEnum = &file_envoy_annotations_deprecation_proto_extTypes[3]
+)
+
+var File_envoy_annotations_deprecation_proto protoreflect.FileDescriptor
+
+var file_envoy_annotations_deprecation_proto_rawDesc = []byte{
+ 0x0a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x2f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x11, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x6e, 0x6e,
+ 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69,
+ 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x3a, 0x54, 0x0a, 0x15, 0x64, 0x69,
+ 0x73, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x5f, 0x64, 0x65, 0x66, 0x61,
+ 0x75, 0x6c, 0x74, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x18, 0xe7, 0xad, 0xae, 0x5a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x64, 0x69, 0x73,
+ 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x42, 0x79, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74,
+ 0x3a, 0x5f, 0x0a, 0x1b, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61,
+ 0x74, 0x5f, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12,
+ 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xf2,
+ 0xe8, 0x80, 0x4b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x18, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61,
+ 0x74, 0x65, 0x64, 0x41, 0x74, 0x4d, 0x69, 0x6e, 0x6f, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f,
+ 0x6e, 0x3a, 0x61, 0x0a, 0x1a, 0x64, 0x69, 0x73, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x5f,
+ 0x62, 0x79, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x12,
+ 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x18, 0xf5, 0xce, 0xb6, 0x21, 0x20, 0x01, 0x28, 0x08, 0x52, 0x17, 0x64, 0x69, 0x73,
+ 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x42, 0x79, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74,
+ 0x45, 0x6e, 0x75, 0x6d, 0x3a, 0x6c, 0x0a, 0x20, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74,
+ 0x65, 0x64, 0x5f, 0x61, 0x74, 0x5f, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73,
+ 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56,
+ 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xc1, 0xbe, 0xb3, 0x56,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x1c, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64,
+ 0x41, 0x74, 0x4d, 0x69, 0x6e, 0x6f, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x45, 0x6e,
+ 0x75, 0x6d, 0x42, 0x3a, 0x5a, 0x38, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d,
+ 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63,
+ 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x62, 0x06,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var file_envoy_annotations_deprecation_proto_goTypes = []interface{}{
+ (*descriptorpb.FieldOptions)(nil), // 0: google.protobuf.FieldOptions
+ (*descriptorpb.EnumValueOptions)(nil), // 1: google.protobuf.EnumValueOptions
+}
+var file_envoy_annotations_deprecation_proto_depIdxs = []int32{
+ 0, // 0: envoy.annotations.disallowed_by_default:extendee -> google.protobuf.FieldOptions
+ 0, // 1: envoy.annotations.deprecated_at_minor_version:extendee -> google.protobuf.FieldOptions
+ 1, // 2: envoy.annotations.disallowed_by_default_enum:extendee -> google.protobuf.EnumValueOptions
+ 1, // 3: envoy.annotations.deprecated_at_minor_version_enum:extendee -> google.protobuf.EnumValueOptions
+ 4, // [4:4] is the sub-list for method output_type
+ 4, // [4:4] is the sub-list for method input_type
+ 4, // [4:4] is the sub-list for extension type_name
+ 0, // [0:4] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_envoy_annotations_deprecation_proto_init() }
+func file_envoy_annotations_deprecation_proto_init() {
+ if File_envoy_annotations_deprecation_proto != nil {
+ return
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_annotations_deprecation_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 0,
+ NumExtensions: 4,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_annotations_deprecation_proto_goTypes,
+ DependencyIndexes: file_envoy_annotations_deprecation_proto_depIdxs,
+ ExtensionInfos: file_envoy_annotations_deprecation_proto_extTypes,
+ }.Build()
+ File_envoy_annotations_deprecation_proto = out.File
+ file_envoy_annotations_deprecation_proto_rawDesc = nil
+ file_envoy_annotations_deprecation_proto_goTypes = nil
+ file_envoy_annotations_deprecation_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cilium/proxy/go/envoy/annotations/deprecation.pb.validate.go b/vendor/github.com/cilium/proxy/go/envoy/annotations/deprecation.pb.validate.go
new file mode 100644
index 000000000..2452fcdd6
--- /dev/null
+++ b/vendor/github.com/cilium/proxy/go/envoy/annotations/deprecation.pb.validate.go
@@ -0,0 +1,36 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/annotations/deprecation.proto
+
+package annotations
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
diff --git a/vendor/github.com/cilium/proxy/go/envoy/annotations/resource.pb.go b/vendor/github.com/cilium/proxy/go/envoy/annotations/resource.pb.go
new file mode 100644
index 000000000..5b42ed96a
--- /dev/null
+++ b/vendor/github.com/cilium/proxy/go/envoy/annotations/resource.pb.go
@@ -0,0 +1,184 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.23.0
+// protoc v4.23.1
+// source: envoy/annotations/resource.proto
+
+package annotations
+
+import (
+ proto "github.com/golang/protobuf/proto"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ descriptorpb "google.golang.org/protobuf/types/descriptorpb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+type ResourceAnnotation struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Annotation for xDS services that indicates the fully-qualified Protobuf type for the resource
+ // type.
+ Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
+}
+
+func (x *ResourceAnnotation) Reset() {
+ *x = ResourceAnnotation{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_annotations_resource_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ResourceAnnotation) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ResourceAnnotation) ProtoMessage() {}
+
+func (x *ResourceAnnotation) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_annotations_resource_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ResourceAnnotation.ProtoReflect.Descriptor instead.
+func (*ResourceAnnotation) Descriptor() ([]byte, []int) {
+ return file_envoy_annotations_resource_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *ResourceAnnotation) GetType() string {
+ if x != nil {
+ return x.Type
+ }
+ return ""
+}
+
+var file_envoy_annotations_resource_proto_extTypes = []protoimpl.ExtensionInfo{
+ {
+ ExtendedType: (*descriptorpb.ServiceOptions)(nil),
+ ExtensionType: (*ResourceAnnotation)(nil),
+ Field: 265073217,
+ Name: "envoy.annotations.resource",
+ Tag: "bytes,265073217,opt,name=resource",
+ Filename: "envoy/annotations/resource.proto",
+ },
+}
+
+// Extension fields to descriptorpb.ServiceOptions.
+var (
+ // optional envoy.annotations.ResourceAnnotation resource = 265073217;
+ E_Resource = &file_envoy_annotations_resource_proto_extTypes[0]
+)
+
+var File_envoy_annotations_resource_proto protoreflect.FileDescriptor
+
+var file_envoy_annotations_resource_proto_rawDesc = []byte{
+ 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x12, 0x11, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f,
+ 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x28, 0x0a, 0x12, 0x52, 0x65, 0x73, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a,
+ 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70,
+ 0x65, 0x3a, 0x65, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1f, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+ 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xc1,
+ 0xe4, 0xb2, 0x7e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e,
+ 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x52, 0x65, 0x73, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08,
+ 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x3a, 0x5a, 0x38, 0x67, 0x69, 0x74, 0x68,
+ 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78,
+ 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61,
+ 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_annotations_resource_proto_rawDescOnce sync.Once
+ file_envoy_annotations_resource_proto_rawDescData = file_envoy_annotations_resource_proto_rawDesc
+)
+
+func file_envoy_annotations_resource_proto_rawDescGZIP() []byte {
+ file_envoy_annotations_resource_proto_rawDescOnce.Do(func() {
+ file_envoy_annotations_resource_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_annotations_resource_proto_rawDescData)
+ })
+ return file_envoy_annotations_resource_proto_rawDescData
+}
+
+var file_envoy_annotations_resource_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_envoy_annotations_resource_proto_goTypes = []interface{}{
+ (*ResourceAnnotation)(nil), // 0: envoy.annotations.ResourceAnnotation
+ (*descriptorpb.ServiceOptions)(nil), // 1: google.protobuf.ServiceOptions
+}
+var file_envoy_annotations_resource_proto_depIdxs = []int32{
+ 1, // 0: envoy.annotations.resource:extendee -> google.protobuf.ServiceOptions
+ 0, // 1: envoy.annotations.resource:type_name -> envoy.annotations.ResourceAnnotation
+ 2, // [2:2] is the sub-list for method output_type
+ 2, // [2:2] is the sub-list for method input_type
+ 1, // [1:2] is the sub-list for extension type_name
+ 0, // [0:1] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_envoy_annotations_resource_proto_init() }
+func file_envoy_annotations_resource_proto_init() {
+ if File_envoy_annotations_resource_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_annotations_resource_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ResourceAnnotation); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_annotations_resource_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 1,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_annotations_resource_proto_goTypes,
+ DependencyIndexes: file_envoy_annotations_resource_proto_depIdxs,
+ MessageInfos: file_envoy_annotations_resource_proto_msgTypes,
+ ExtensionInfos: file_envoy_annotations_resource_proto_extTypes,
+ }.Build()
+ File_envoy_annotations_resource_proto = out.File
+ file_envoy_annotations_resource_proto_rawDesc = nil
+ file_envoy_annotations_resource_proto_goTypes = nil
+ file_envoy_annotations_resource_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cilium/proxy/go/envoy/annotations/resource.pb.validate.go b/vendor/github.com/cilium/proxy/go/envoy/annotations/resource.pb.validate.go
new file mode 100644
index 000000000..05dc78968
--- /dev/null
+++ b/vendor/github.com/cilium/proxy/go/envoy/annotations/resource.pb.validate.go
@@ -0,0 +1,140 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/annotations/resource.proto
+
+package annotations
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on ResourceAnnotation with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *ResourceAnnotation) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on ResourceAnnotation with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// ResourceAnnotationMultiError, or nil if none found.
+func (m *ResourceAnnotation) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *ResourceAnnotation) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for Type
+
+ if len(errors) > 0 {
+ return ResourceAnnotationMultiError(errors)
+ }
+
+ return nil
+}
+
+// ResourceAnnotationMultiError is an error wrapping multiple validation errors
+// returned by ResourceAnnotation.ValidateAll() if the designated constraints
+// aren't met.
+type ResourceAnnotationMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ResourceAnnotationMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ResourceAnnotationMultiError) AllErrors() []error { return m }
+
+// ResourceAnnotationValidationError is the validation error returned by
+// ResourceAnnotation.Validate if the designated constraints aren't met.
+type ResourceAnnotationValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ResourceAnnotationValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ResourceAnnotationValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ResourceAnnotationValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ResourceAnnotationValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ResourceAnnotationValidationError) ErrorName() string {
+ return "ResourceAnnotationValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e ResourceAnnotationValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sResourceAnnotation.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ResourceAnnotationValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ResourceAnnotationValidationError{}
diff --git a/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/address.pb.go b/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/address.pb.go
new file mode 100644
index 000000000..5bbf74f97
--- /dev/null
+++ b/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/address.pb.go
@@ -0,0 +1,1095 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.23.0
+// protoc v4.23.1
+// source: envoy/config/core/v3/address.proto
+
+package corev3
+
+import (
+ _ "github.com/cilium/proxy/go/envoy/annotations"
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ proto "github.com/golang/protobuf/proto"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+type SocketAddress_Protocol int32
+
+const (
+ SocketAddress_TCP SocketAddress_Protocol = 0
+ SocketAddress_UDP SocketAddress_Protocol = 1
+)
+
+// Enum value maps for SocketAddress_Protocol.
+var (
+ SocketAddress_Protocol_name = map[int32]string{
+ 0: "TCP",
+ 1: "UDP",
+ }
+ SocketAddress_Protocol_value = map[string]int32{
+ "TCP": 0,
+ "UDP": 1,
+ }
+)
+
+func (x SocketAddress_Protocol) Enum() *SocketAddress_Protocol {
+ p := new(SocketAddress_Protocol)
+ *p = x
+ return p
+}
+
+func (x SocketAddress_Protocol) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (SocketAddress_Protocol) Descriptor() protoreflect.EnumDescriptor {
+ return file_envoy_config_core_v3_address_proto_enumTypes[0].Descriptor()
+}
+
+func (SocketAddress_Protocol) Type() protoreflect.EnumType {
+ return &file_envoy_config_core_v3_address_proto_enumTypes[0]
+}
+
+func (x SocketAddress_Protocol) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use SocketAddress_Protocol.Descriptor instead.
+func (SocketAddress_Protocol) EnumDescriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_address_proto_rawDescGZIP(), []int{2, 0}
+}
+
+type Pipe struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Unix Domain Socket path. On Linux, paths starting with '@' will use the
+ // abstract namespace. The starting '@' is replaced by a null byte by Envoy.
+ // Paths starting with '@' will result in an error in environments other than
+ // Linux.
+ Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
+ // The mode for the Pipe. Not applicable for abstract sockets.
+ Mode uint32 `protobuf:"varint,2,opt,name=mode,proto3" json:"mode,omitempty"`
+}
+
+func (x *Pipe) Reset() {
+ *x = Pipe{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_address_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Pipe) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Pipe) ProtoMessage() {}
+
+func (x *Pipe) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_address_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Pipe.ProtoReflect.Descriptor instead.
+func (*Pipe) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_address_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Pipe) GetPath() string {
+ if x != nil {
+ return x.Path
+ }
+ return ""
+}
+
+func (x *Pipe) GetMode() uint32 {
+ if x != nil {
+ return x.Mode
+ }
+ return 0
+}
+
+// The address represents an envoy internal listener.
+// [#comment: TODO(asraa): When address available, remove workaround from test/server/server_fuzz_test.cc:30.]
+type EnvoyInternalAddress struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to AddressNameSpecifier:
+ //
+ // *EnvoyInternalAddress_ServerListenerName
+ AddressNameSpecifier isEnvoyInternalAddress_AddressNameSpecifier `protobuf_oneof:"address_name_specifier"`
+ // Specifies an endpoint identifier to distinguish between multiple endpoints for the same internal listener in a
+ // single upstream pool. Only used in the upstream addresses for tracking changes to individual endpoints. This, for
+ // example, may be set to the final destination IP for the target internal listener.
+ EndpointId string `protobuf:"bytes,2,opt,name=endpoint_id,json=endpointId,proto3" json:"endpoint_id,omitempty"`
+}
+
+func (x *EnvoyInternalAddress) Reset() {
+ *x = EnvoyInternalAddress{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_address_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *EnvoyInternalAddress) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*EnvoyInternalAddress) ProtoMessage() {}
+
+func (x *EnvoyInternalAddress) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_address_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use EnvoyInternalAddress.ProtoReflect.Descriptor instead.
+func (*EnvoyInternalAddress) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_address_proto_rawDescGZIP(), []int{1}
+}
+
+func (m *EnvoyInternalAddress) GetAddressNameSpecifier() isEnvoyInternalAddress_AddressNameSpecifier {
+ if m != nil {
+ return m.AddressNameSpecifier
+ }
+ return nil
+}
+
+func (x *EnvoyInternalAddress) GetServerListenerName() string {
+ if x, ok := x.GetAddressNameSpecifier().(*EnvoyInternalAddress_ServerListenerName); ok {
+ return x.ServerListenerName
+ }
+ return ""
+}
+
+func (x *EnvoyInternalAddress) GetEndpointId() string {
+ if x != nil {
+ return x.EndpointId
+ }
+ return ""
+}
+
+type isEnvoyInternalAddress_AddressNameSpecifier interface {
+ isEnvoyInternalAddress_AddressNameSpecifier()
+}
+
+type EnvoyInternalAddress_ServerListenerName struct {
+ // Specifies the :ref:`name ` of the
+ // internal listener.
+ ServerListenerName string `protobuf:"bytes,1,opt,name=server_listener_name,json=serverListenerName,proto3,oneof"`
+}
+
+func (*EnvoyInternalAddress_ServerListenerName) isEnvoyInternalAddress_AddressNameSpecifier() {}
+
+// [#next-free-field: 7]
+type SocketAddress struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Protocol SocketAddress_Protocol `protobuf:"varint,1,opt,name=protocol,proto3,enum=envoy.config.core.v3.SocketAddress_Protocol" json:"protocol,omitempty"`
+ // The address for this socket. :ref:`Listeners ` will bind
+ // to the address. An empty address is not allowed. Specify “0.0.0.0“ or “::“
+ // to bind to any address. [#comment:TODO(zuercher) reinstate when implemented:
+ // It is possible to distinguish a Listener address via the prefix/suffix matching
+ // in :ref:`FilterChainMatch `.] When used
+ // within an upstream :ref:`BindConfig `, the address
+ // controls the source address of outbound connections. For :ref:`clusters
+ // `, the cluster type determines whether the
+ // address must be an IP (“STATIC“ or “EDS“ clusters) or a hostname resolved by DNS
+ // (“STRICT_DNS“ or “LOGICAL_DNS“ clusters). Address resolution can be customized
+ // via :ref:`resolver_name `.
+ Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"`
+ // Types that are assignable to PortSpecifier:
+ //
+ // *SocketAddress_PortValue
+ // *SocketAddress_NamedPort
+ PortSpecifier isSocketAddress_PortSpecifier `protobuf_oneof:"port_specifier"`
+ // The name of the custom resolver. This must have been registered with Envoy. If
+ // this is empty, a context dependent default applies. If the address is a concrete
+ // IP address, no resolution will occur. If address is a hostname this
+ // should be set for resolution other than DNS. Specifying a custom resolver with
+ // “STRICT_DNS“ or “LOGICAL_DNS“ will generate an error at runtime.
+ ResolverName string `protobuf:"bytes,5,opt,name=resolver_name,json=resolverName,proto3" json:"resolver_name,omitempty"`
+ // When binding to an IPv6 address above, this enables `IPv4 compatibility
+ // `_. Binding to “::“ will
+ // allow both IPv4 and IPv6 connections, with peer IPv4 addresses mapped into
+ // IPv6 space as “::FFFF:“.
+ Ipv4Compat bool `protobuf:"varint,6,opt,name=ipv4_compat,json=ipv4Compat,proto3" json:"ipv4_compat,omitempty"`
+}
+
+func (x *SocketAddress) Reset() {
+ *x = SocketAddress{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_address_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SocketAddress) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SocketAddress) ProtoMessage() {}
+
+func (x *SocketAddress) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_address_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SocketAddress.ProtoReflect.Descriptor instead.
+func (*SocketAddress) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_address_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *SocketAddress) GetProtocol() SocketAddress_Protocol {
+ if x != nil {
+ return x.Protocol
+ }
+ return SocketAddress_TCP
+}
+
+func (x *SocketAddress) GetAddress() string {
+ if x != nil {
+ return x.Address
+ }
+ return ""
+}
+
+func (m *SocketAddress) GetPortSpecifier() isSocketAddress_PortSpecifier {
+ if m != nil {
+ return m.PortSpecifier
+ }
+ return nil
+}
+
+func (x *SocketAddress) GetPortValue() uint32 {
+ if x, ok := x.GetPortSpecifier().(*SocketAddress_PortValue); ok {
+ return x.PortValue
+ }
+ return 0
+}
+
+func (x *SocketAddress) GetNamedPort() string {
+ if x, ok := x.GetPortSpecifier().(*SocketAddress_NamedPort); ok {
+ return x.NamedPort
+ }
+ return ""
+}
+
+func (x *SocketAddress) GetResolverName() string {
+ if x != nil {
+ return x.ResolverName
+ }
+ return ""
+}
+
+func (x *SocketAddress) GetIpv4Compat() bool {
+ if x != nil {
+ return x.Ipv4Compat
+ }
+ return false
+}
+
+type isSocketAddress_PortSpecifier interface {
+ isSocketAddress_PortSpecifier()
+}
+
+type SocketAddress_PortValue struct {
+ PortValue uint32 `protobuf:"varint,3,opt,name=port_value,json=portValue,proto3,oneof"`
+}
+
+type SocketAddress_NamedPort struct {
+ // This is only valid if :ref:`resolver_name
+ // ` is specified below and the
+ // named resolver is capable of named port resolution.
+ NamedPort string `protobuf:"bytes,4,opt,name=named_port,json=namedPort,proto3,oneof"`
+}
+
+func (*SocketAddress_PortValue) isSocketAddress_PortSpecifier() {}
+
+func (*SocketAddress_NamedPort) isSocketAddress_PortSpecifier() {}
+
+type TcpKeepalive struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Maximum number of keepalive probes to send without response before deciding
+ // the connection is dead. Default is to use the OS level configuration (unless
+ // overridden, Linux defaults to 9.)
+ KeepaliveProbes *wrapperspb.UInt32Value `protobuf:"bytes,1,opt,name=keepalive_probes,json=keepaliveProbes,proto3" json:"keepalive_probes,omitempty"`
+ // The number of seconds a connection needs to be idle before keep-alive probes
+ // start being sent. Default is to use the OS level configuration (unless
+ // overridden, Linux defaults to 7200s (i.e., 2 hours.)
+ KeepaliveTime *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=keepalive_time,json=keepaliveTime,proto3" json:"keepalive_time,omitempty"`
+ // The number of seconds between keep-alive probes. Default is to use the OS
+ // level configuration (unless overridden, Linux defaults to 75s.)
+ KeepaliveInterval *wrapperspb.UInt32Value `protobuf:"bytes,3,opt,name=keepalive_interval,json=keepaliveInterval,proto3" json:"keepalive_interval,omitempty"`
+}
+
+func (x *TcpKeepalive) Reset() {
+ *x = TcpKeepalive{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_address_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *TcpKeepalive) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TcpKeepalive) ProtoMessage() {}
+
+func (x *TcpKeepalive) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_address_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TcpKeepalive.ProtoReflect.Descriptor instead.
+func (*TcpKeepalive) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_address_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *TcpKeepalive) GetKeepaliveProbes() *wrapperspb.UInt32Value {
+ if x != nil {
+ return x.KeepaliveProbes
+ }
+ return nil
+}
+
+func (x *TcpKeepalive) GetKeepaliveTime() *wrapperspb.UInt32Value {
+ if x != nil {
+ return x.KeepaliveTime
+ }
+ return nil
+}
+
+func (x *TcpKeepalive) GetKeepaliveInterval() *wrapperspb.UInt32Value {
+ if x != nil {
+ return x.KeepaliveInterval
+ }
+ return nil
+}
+
+type ExtraSourceAddress struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The additional address to bind.
+ Address *SocketAddress `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"`
+ // Additional socket options that may not be present in Envoy source code or
+ // precompiled binaries. If specified, this will override the
+ // :ref:`socket_options `
+ // in the BindConfig. If specified with no
+ // :ref:`socket_options `
+ // or an empty list of :ref:`socket_options `,
+ // it means no socket option will apply.
+ SocketOptions *SocketOptionsOverride `protobuf:"bytes,2,opt,name=socket_options,json=socketOptions,proto3" json:"socket_options,omitempty"`
+}
+
+func (x *ExtraSourceAddress) Reset() {
+ *x = ExtraSourceAddress{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_address_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ExtraSourceAddress) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ExtraSourceAddress) ProtoMessage() {}
+
+func (x *ExtraSourceAddress) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_address_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ExtraSourceAddress.ProtoReflect.Descriptor instead.
+func (*ExtraSourceAddress) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_address_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *ExtraSourceAddress) GetAddress() *SocketAddress {
+ if x != nil {
+ return x.Address
+ }
+ return nil
+}
+
+func (x *ExtraSourceAddress) GetSocketOptions() *SocketOptionsOverride {
+ if x != nil {
+ return x.SocketOptions
+ }
+ return nil
+}
+
+// [#next-free-field: 6]
+type BindConfig struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The address to bind to when creating a socket.
+ SourceAddress *SocketAddress `protobuf:"bytes,1,opt,name=source_address,json=sourceAddress,proto3" json:"source_address,omitempty"`
+ // Whether to set the “IP_FREEBIND“ option when creating the socket. When this
+ // flag is set to true, allows the :ref:`source_address
+ // ` to be an IP address
+ // that is not configured on the system running Envoy. When this flag is set
+ // to false, the option “IP_FREEBIND“ is disabled on the socket. When this
+ // flag is not set (default), the socket is not modified, i.e. the option is
+ // neither enabled nor disabled.
+ Freebind *wrapperspb.BoolValue `protobuf:"bytes,2,opt,name=freebind,proto3" json:"freebind,omitempty"`
+ // Additional socket options that may not be present in Envoy source code or
+ // precompiled binaries.
+ SocketOptions []*SocketOption `protobuf:"bytes,3,rep,name=socket_options,json=socketOptions,proto3" json:"socket_options,omitempty"`
+ // Extra source addresses appended to the address specified in the `source_address`
+ // field. This enables to specify multiple source addresses. Currently, only one extra
+ // address can be supported, and the extra address should have a different IP version
+ // with the address in the `source_address` field. The address which has the same IP
+ // version with the target host's address IP version will be used as bind address. If more
+ // than one extra address specified, only the first address matched IP version will be
+ // returned. If there is no same IP version address found, the address in the `source_address`
+ // will be returned.
+ ExtraSourceAddresses []*ExtraSourceAddress `protobuf:"bytes,5,rep,name=extra_source_addresses,json=extraSourceAddresses,proto3" json:"extra_source_addresses,omitempty"`
+ // Deprecated by
+ // :ref:`extra_source_addresses `
+ //
+ // Deprecated: Do not use.
+ AdditionalSourceAddresses []*SocketAddress `protobuf:"bytes,4,rep,name=additional_source_addresses,json=additionalSourceAddresses,proto3" json:"additional_source_addresses,omitempty"`
+}
+
+func (x *BindConfig) Reset() {
+ *x = BindConfig{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_address_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *BindConfig) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*BindConfig) ProtoMessage() {}
+
+func (x *BindConfig) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_address_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use BindConfig.ProtoReflect.Descriptor instead.
+func (*BindConfig) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_address_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *BindConfig) GetSourceAddress() *SocketAddress {
+ if x != nil {
+ return x.SourceAddress
+ }
+ return nil
+}
+
+func (x *BindConfig) GetFreebind() *wrapperspb.BoolValue {
+ if x != nil {
+ return x.Freebind
+ }
+ return nil
+}
+
+func (x *BindConfig) GetSocketOptions() []*SocketOption {
+ if x != nil {
+ return x.SocketOptions
+ }
+ return nil
+}
+
+func (x *BindConfig) GetExtraSourceAddresses() []*ExtraSourceAddress {
+ if x != nil {
+ return x.ExtraSourceAddresses
+ }
+ return nil
+}
+
+// Deprecated: Do not use.
+func (x *BindConfig) GetAdditionalSourceAddresses() []*SocketAddress {
+ if x != nil {
+ return x.AdditionalSourceAddresses
+ }
+ return nil
+}
+
+// Addresses specify either a logical or physical address and port, which are
+// used to tell Envoy where to bind/listen, connect to upstream and find
+// management servers.
+type Address struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to Address:
+ //
+ // *Address_SocketAddress
+ // *Address_Pipe
+ // *Address_EnvoyInternalAddress
+ Address isAddress_Address `protobuf_oneof:"address"`
+}
+
+func (x *Address) Reset() {
+ *x = Address{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_address_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Address) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Address) ProtoMessage() {}
+
+func (x *Address) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_address_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Address.ProtoReflect.Descriptor instead.
+func (*Address) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_address_proto_rawDescGZIP(), []int{6}
+}
+
+func (m *Address) GetAddress() isAddress_Address {
+ if m != nil {
+ return m.Address
+ }
+ return nil
+}
+
+func (x *Address) GetSocketAddress() *SocketAddress {
+ if x, ok := x.GetAddress().(*Address_SocketAddress); ok {
+ return x.SocketAddress
+ }
+ return nil
+}
+
+func (x *Address) GetPipe() *Pipe {
+ if x, ok := x.GetAddress().(*Address_Pipe); ok {
+ return x.Pipe
+ }
+ return nil
+}
+
+func (x *Address) GetEnvoyInternalAddress() *EnvoyInternalAddress {
+ if x, ok := x.GetAddress().(*Address_EnvoyInternalAddress); ok {
+ return x.EnvoyInternalAddress
+ }
+ return nil
+}
+
+type isAddress_Address interface {
+ isAddress_Address()
+}
+
+type Address_SocketAddress struct {
+ SocketAddress *SocketAddress `protobuf:"bytes,1,opt,name=socket_address,json=socketAddress,proto3,oneof"`
+}
+
+type Address_Pipe struct {
+ Pipe *Pipe `protobuf:"bytes,2,opt,name=pipe,proto3,oneof"`
+}
+
+type Address_EnvoyInternalAddress struct {
+ // Specifies a user-space address handled by :ref:`internal listeners
+ // `.
+ EnvoyInternalAddress *EnvoyInternalAddress `protobuf:"bytes,3,opt,name=envoy_internal_address,json=envoyInternalAddress,proto3,oneof"`
+}
+
+func (*Address_SocketAddress) isAddress_Address() {}
+
+func (*Address_Pipe) isAddress_Address() {}
+
+func (*Address_EnvoyInternalAddress) isAddress_Address() {}
+
+// CidrRange specifies an IP Address and a prefix length to construct
+// the subnet mask for a `CIDR `_ range.
+type CidrRange struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // IPv4 or IPv6 address, e.g. “192.0.0.0“ or “2001:db8::“.
+ AddressPrefix string `protobuf:"bytes,1,opt,name=address_prefix,json=addressPrefix,proto3" json:"address_prefix,omitempty"`
+ // Length of prefix, e.g. 0, 32. Defaults to 0 when unset.
+ PrefixLen *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=prefix_len,json=prefixLen,proto3" json:"prefix_len,omitempty"`
+}
+
+func (x *CidrRange) Reset() {
+ *x = CidrRange{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_address_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CidrRange) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CidrRange) ProtoMessage() {}
+
+func (x *CidrRange) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_address_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CidrRange.ProtoReflect.Descriptor instead.
+func (*CidrRange) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_address_proto_rawDescGZIP(), []int{7}
+}
+
+func (x *CidrRange) GetAddressPrefix() string {
+ if x != nil {
+ return x.AddressPrefix
+ }
+ return ""
+}
+
+func (x *CidrRange) GetPrefixLen() *wrapperspb.UInt32Value {
+ if x != nil {
+ return x.PrefixLen
+ }
+ return nil
+}
+
+var File_envoy_config_core_v3_address_proto protoreflect.FileDescriptor
+
+var file_envoy_config_core_v3_address_proto_rawDesc = []byte{
+ 0x0a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63,
+ 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x28, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33,
+ 0x2f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f,
+ 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f,
+ 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74,
+ 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61,
+ 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69,
+ 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c,
+ 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x60, 0x0a, 0x04, 0x50, 0x69, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x04,
+ 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72,
+ 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1c, 0x0a, 0x04, 0x6d, 0x6f, 0x64,
+ 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x2a, 0x03, 0x18, 0xff,
+ 0x03, 0x52, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x3a, 0x1d, 0x9a, 0xc5, 0x88, 0x1e, 0x18, 0x0a, 0x16,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72,
+ 0x65, 0x2e, 0x50, 0x69, 0x70, 0x65, 0x22, 0x8a, 0x01, 0x0a, 0x14, 0x45, 0x6e, 0x76, 0x6f, 0x79,
+ 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12,
+ 0x32, 0x0a, 0x14, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e,
+ 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52,
+ 0x12, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x4e,
+ 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f,
+ 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69,
+ 0x6e, 0x74, 0x49, 0x64, 0x42, 0x1d, 0x0a, 0x16, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x5f,
+ 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03,
+ 0xf8, 0x42, 0x01, 0x22, 0xf6, 0x02, 0x0a, 0x0d, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x64,
+ 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x52, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f,
+ 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e,
+ 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x53,
+ 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x50, 0x72, 0x6f,
+ 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52,
+ 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x21, 0x0a, 0x07, 0x61, 0x64, 0x64,
+ 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72,
+ 0x02, 0x10, 0x01, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x2a, 0x0a, 0x0a,
+ 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d,
+ 0x42, 0x09, 0xfa, 0x42, 0x06, 0x2a, 0x04, 0x18, 0xff, 0xff, 0x03, 0x48, 0x00, 0x52, 0x09, 0x70,
+ 0x6f, 0x72, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x6e, 0x61, 0x6d, 0x65,
+ 0x64, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x09,
+ 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73,
+ 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1f,
+ 0x0a, 0x0b, 0x69, 0x70, 0x76, 0x34, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x74, 0x18, 0x06, 0x20,
+ 0x01, 0x28, 0x08, 0x52, 0x0a, 0x69, 0x70, 0x76, 0x34, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x74, 0x22,
+ 0x1c, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x07, 0x0a, 0x03, 0x54,
+ 0x43, 0x50, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x55, 0x44, 0x50, 0x10, 0x01, 0x3a, 0x26, 0x9a,
+ 0xc5, 0x88, 0x1e, 0x21, 0x0a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e,
+ 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x64,
+ 0x64, 0x72, 0x65, 0x73, 0x73, 0x42, 0x15, 0x0a, 0x0e, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x70,
+ 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0x90, 0x02, 0x0a,
+ 0x0c, 0x54, 0x63, 0x70, 0x4b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x12, 0x47, 0x0a,
+ 0x10, 0x6b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x62, 0x65,
+ 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32,
+ 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0f, 0x6b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65,
+ 0x50, 0x72, 0x6f, 0x62, 0x65, 0x73, 0x12, 0x43, 0x0a, 0x0e, 0x6b, 0x65, 0x65, 0x70, 0x61, 0x6c,
+ 0x69, 0x76, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0d, 0x6b, 0x65,
+ 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x4b, 0x0a, 0x12, 0x6b,
+ 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61,
+ 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32,
+ 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x11, 0x6b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65,
+ 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x3a, 0x25, 0x9a, 0xc5, 0x88, 0x1e, 0x20, 0x0a,
+ 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f,
+ 0x72, 0x65, 0x2e, 0x54, 0x63, 0x70, 0x4b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x22,
+ 0xb1, 0x01, 0x0a, 0x12, 0x45, 0x78, 0x74, 0x72, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x41,
+ 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x47, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73,
+ 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e,
+ 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x53,
+ 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x42, 0x08, 0xfa, 0x42,
+ 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12,
+ 0x52, 0x0a, 0x0e, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e,
+ 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x53,
+ 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x4f, 0x76, 0x65, 0x72,
+ 0x72, 0x69, 0x64, 0x65, 0x52, 0x0d, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x22, 0xd2, 0x03, 0x0a, 0x0a, 0x42, 0x69, 0x6e, 0x64, 0x43, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x12, 0x4a, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x61, 0x64, 0x64,
+ 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76,
+ 0x33, 0x2e, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52,
+ 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x36,
+ 0x0a, 0x08, 0x66, 0x72, 0x65, 0x65, 0x62, 0x69, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x08, 0x66, 0x72,
+ 0x65, 0x65, 0x62, 0x69, 0x6e, 0x64, 0x12, 0x49, 0x0a, 0x0e, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74,
+ 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22,
+ 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f,
+ 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x52, 0x0d, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x12, 0x5e, 0x0a, 0x16, 0x65, 0x78, 0x74, 0x72, 0x61, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63,
+ 0x65, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28,
+ 0x0b, 0x32, 0x28, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x78, 0x74, 0x72, 0x61, 0x53, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x14, 0x65, 0x78, 0x74,
+ 0x72, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65,
+ 0x73, 0x12, 0x70, 0x0a, 0x1b, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f,
+ 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73,
+ 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6f,
+ 0x63, 0x6b, 0x65, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x42, 0x0b, 0x18, 0x01, 0x92,
+ 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x52, 0x19, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69,
+ 0x6f, 0x6e, 0x61, 0x6c, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73,
+ 0x73, 0x65, 0x73, 0x3a, 0x23, 0x9a, 0xc5, 0x88, 0x1e, 0x1e, 0x0a, 0x1c, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x42, 0x69,
+ 0x6e, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x9f, 0x02, 0x0a, 0x07, 0x41, 0x64, 0x64,
+ 0x72, 0x65, 0x73, 0x73, 0x12, 0x4c, 0x0a, 0x0e, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x61,
+ 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65,
+ 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65,
+ 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73,
+ 0x73, 0x48, 0x00, 0x52, 0x0d, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65,
+ 0x73, 0x73, 0x12, 0x30, 0x0a, 0x04, 0x70, 0x69, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x1a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e,
+ 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x69, 0x70, 0x65, 0x48, 0x00, 0x52, 0x04,
+ 0x70, 0x69, 0x70, 0x65, 0x12, 0x62, 0x0a, 0x16, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x5f, 0x69, 0x6e,
+ 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x6e, 0x76, 0x6f,
+ 0x79, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73,
+ 0x48, 0x00, 0x52, 0x14, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61,
+ 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x3a, 0x20, 0x9a, 0xc5, 0x88, 0x1e, 0x1b, 0x0a,
+ 0x19, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f,
+ 0x72, 0x65, 0x2e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x42, 0x0e, 0x0a, 0x07, 0x61, 0x64,
+ 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0xa6, 0x01, 0x0a, 0x09, 0x43,
+ 0x69, 0x64, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x2e, 0x0a, 0x0e, 0x61, 0x64, 0x64, 0x72,
+ 0x65, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0d, 0x61, 0x64, 0x64, 0x72, 0x65,
+ 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x45, 0x0a, 0x0a, 0x70, 0x72, 0x65, 0x66,
+ 0x69, 0x78, 0x5f, 0x6c, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55,
+ 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x2a,
+ 0x03, 0x18, 0x80, 0x01, 0x52, 0x09, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x4c, 0x65, 0x6e, 0x3a,
+ 0x22, 0x9a, 0xc5, 0x88, 0x1e, 0x1d, 0x0a, 0x1b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70,
+ 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x69, 0x64, 0x72, 0x52, 0x61,
+ 0x6e, 0x67, 0x65, 0x42, 0x80, 0x01, 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x0c, 0x41, 0x64, 0x64, 0x72,
+ 0x65, 0x73, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68,
+ 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78,
+ 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61,
+ 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f,
+ 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x63, 0x6f, 0x72, 0x65, 0x76, 0x33, 0xba, 0x80,
+ 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_config_core_v3_address_proto_rawDescOnce sync.Once
+ file_envoy_config_core_v3_address_proto_rawDescData = file_envoy_config_core_v3_address_proto_rawDesc
+)
+
+func file_envoy_config_core_v3_address_proto_rawDescGZIP() []byte {
+ file_envoy_config_core_v3_address_proto_rawDescOnce.Do(func() {
+ file_envoy_config_core_v3_address_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_core_v3_address_proto_rawDescData)
+ })
+ return file_envoy_config_core_v3_address_proto_rawDescData
+}
+
+var file_envoy_config_core_v3_address_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_envoy_config_core_v3_address_proto_msgTypes = make([]protoimpl.MessageInfo, 8)
+var file_envoy_config_core_v3_address_proto_goTypes = []interface{}{
+ (SocketAddress_Protocol)(0), // 0: envoy.config.core.v3.SocketAddress.Protocol
+ (*Pipe)(nil), // 1: envoy.config.core.v3.Pipe
+ (*EnvoyInternalAddress)(nil), // 2: envoy.config.core.v3.EnvoyInternalAddress
+ (*SocketAddress)(nil), // 3: envoy.config.core.v3.SocketAddress
+ (*TcpKeepalive)(nil), // 4: envoy.config.core.v3.TcpKeepalive
+ (*ExtraSourceAddress)(nil), // 5: envoy.config.core.v3.ExtraSourceAddress
+ (*BindConfig)(nil), // 6: envoy.config.core.v3.BindConfig
+ (*Address)(nil), // 7: envoy.config.core.v3.Address
+ (*CidrRange)(nil), // 8: envoy.config.core.v3.CidrRange
+ (*wrapperspb.UInt32Value)(nil), // 9: google.protobuf.UInt32Value
+ (*SocketOptionsOverride)(nil), // 10: envoy.config.core.v3.SocketOptionsOverride
+ (*wrapperspb.BoolValue)(nil), // 11: google.protobuf.BoolValue
+ (*SocketOption)(nil), // 12: envoy.config.core.v3.SocketOption
+}
+var file_envoy_config_core_v3_address_proto_depIdxs = []int32{
+ 0, // 0: envoy.config.core.v3.SocketAddress.protocol:type_name -> envoy.config.core.v3.SocketAddress.Protocol
+ 9, // 1: envoy.config.core.v3.TcpKeepalive.keepalive_probes:type_name -> google.protobuf.UInt32Value
+ 9, // 2: envoy.config.core.v3.TcpKeepalive.keepalive_time:type_name -> google.protobuf.UInt32Value
+ 9, // 3: envoy.config.core.v3.TcpKeepalive.keepalive_interval:type_name -> google.protobuf.UInt32Value
+ 3, // 4: envoy.config.core.v3.ExtraSourceAddress.address:type_name -> envoy.config.core.v3.SocketAddress
+ 10, // 5: envoy.config.core.v3.ExtraSourceAddress.socket_options:type_name -> envoy.config.core.v3.SocketOptionsOverride
+ 3, // 6: envoy.config.core.v3.BindConfig.source_address:type_name -> envoy.config.core.v3.SocketAddress
+ 11, // 7: envoy.config.core.v3.BindConfig.freebind:type_name -> google.protobuf.BoolValue
+ 12, // 8: envoy.config.core.v3.BindConfig.socket_options:type_name -> envoy.config.core.v3.SocketOption
+ 5, // 9: envoy.config.core.v3.BindConfig.extra_source_addresses:type_name -> envoy.config.core.v3.ExtraSourceAddress
+ 3, // 10: envoy.config.core.v3.BindConfig.additional_source_addresses:type_name -> envoy.config.core.v3.SocketAddress
+ 3, // 11: envoy.config.core.v3.Address.socket_address:type_name -> envoy.config.core.v3.SocketAddress
+ 1, // 12: envoy.config.core.v3.Address.pipe:type_name -> envoy.config.core.v3.Pipe
+ 2, // 13: envoy.config.core.v3.Address.envoy_internal_address:type_name -> envoy.config.core.v3.EnvoyInternalAddress
+ 9, // 14: envoy.config.core.v3.CidrRange.prefix_len:type_name -> google.protobuf.UInt32Value
+ 15, // [15:15] is the sub-list for method output_type
+ 15, // [15:15] is the sub-list for method input_type
+ 15, // [15:15] is the sub-list for extension type_name
+ 15, // [15:15] is the sub-list for extension extendee
+ 0, // [0:15] is the sub-list for field type_name
+}
+
+func init() { file_envoy_config_core_v3_address_proto_init() }
+func file_envoy_config_core_v3_address_proto_init() {
+ if File_envoy_config_core_v3_address_proto != nil {
+ return
+ }
+ file_envoy_config_core_v3_socket_option_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_config_core_v3_address_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Pipe); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_address_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*EnvoyInternalAddress); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_address_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SocketAddress); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_address_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*TcpKeepalive); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_address_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ExtraSourceAddress); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_address_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*BindConfig); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_address_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Address); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_address_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CidrRange); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_envoy_config_core_v3_address_proto_msgTypes[1].OneofWrappers = []interface{}{
+ (*EnvoyInternalAddress_ServerListenerName)(nil),
+ }
+ file_envoy_config_core_v3_address_proto_msgTypes[2].OneofWrappers = []interface{}{
+ (*SocketAddress_PortValue)(nil),
+ (*SocketAddress_NamedPort)(nil),
+ }
+ file_envoy_config_core_v3_address_proto_msgTypes[6].OneofWrappers = []interface{}{
+ (*Address_SocketAddress)(nil),
+ (*Address_Pipe)(nil),
+ (*Address_EnvoyInternalAddress)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_config_core_v3_address_proto_rawDesc,
+ NumEnums: 1,
+ NumMessages: 8,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_config_core_v3_address_proto_goTypes,
+ DependencyIndexes: file_envoy_config_core_v3_address_proto_depIdxs,
+ EnumInfos: file_envoy_config_core_v3_address_proto_enumTypes,
+ MessageInfos: file_envoy_config_core_v3_address_proto_msgTypes,
+ }.Build()
+ File_envoy_config_core_v3_address_proto = out.File
+ file_envoy_config_core_v3_address_proto_rawDesc = nil
+ file_envoy_config_core_v3_address_proto_goTypes = nil
+ file_envoy_config_core_v3_address_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/address.pb.validate.go b/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/address.pb.validate.go
new file mode 100644
index 000000000..25610ab04
--- /dev/null
+++ b/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/address.pb.validate.go
@@ -0,0 +1,1449 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/config/core/v3/address.proto
+
+package corev3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on Pipe with the rules defined in the proto
+// definition for this message. If any rules are violated, the first error
+// encountered is returned, or nil if there are no violations.
+func (m *Pipe) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on Pipe with the rules defined in the
+// proto definition for this message. If any rules are violated, the result is
+// a list of violation errors wrapped in PipeMultiError, or nil if none found.
+func (m *Pipe) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Pipe) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if utf8.RuneCountInString(m.GetPath()) < 1 {
+ err := PipeValidationError{
+ field: "Path",
+ reason: "value length must be at least 1 runes",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if m.GetMode() > 511 {
+ err := PipeValidationError{
+ field: "Mode",
+ reason: "value must be less than or equal to 511",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return PipeMultiError(errors)
+ }
+
+ return nil
+}
+
+// PipeMultiError is an error wrapping multiple validation errors returned by
+// Pipe.ValidateAll() if the designated constraints aren't met.
+type PipeMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m PipeMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m PipeMultiError) AllErrors() []error { return m }
+
+// PipeValidationError is the validation error returned by Pipe.Validate if the
+// designated constraints aren't met.
+type PipeValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e PipeValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e PipeValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e PipeValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e PipeValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e PipeValidationError) ErrorName() string { return "PipeValidationError" }
+
+// Error satisfies the builtin error interface
+func (e PipeValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sPipe.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = PipeValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = PipeValidationError{}
+
+// Validate checks the field values on EnvoyInternalAddress with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *EnvoyInternalAddress) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on EnvoyInternalAddress with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// EnvoyInternalAddressMultiError, or nil if none found.
+func (m *EnvoyInternalAddress) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *EnvoyInternalAddress) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for EndpointId
+
+ oneofAddressNameSpecifierPresent := false
+ switch v := m.AddressNameSpecifier.(type) {
+ case *EnvoyInternalAddress_ServerListenerName:
+ if v == nil {
+ err := EnvoyInternalAddressValidationError{
+ field: "AddressNameSpecifier",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofAddressNameSpecifierPresent = true
+ // no validation rules for ServerListenerName
+ default:
+ _ = v // ensures v is used
+ }
+ if !oneofAddressNameSpecifierPresent {
+ err := EnvoyInternalAddressValidationError{
+ field: "AddressNameSpecifier",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return EnvoyInternalAddressMultiError(errors)
+ }
+
+ return nil
+}
+
+// EnvoyInternalAddressMultiError is an error wrapping multiple validation
+// errors returned by EnvoyInternalAddress.ValidateAll() if the designated
+// constraints aren't met.
+type EnvoyInternalAddressMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m EnvoyInternalAddressMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m EnvoyInternalAddressMultiError) AllErrors() []error { return m }
+
+// EnvoyInternalAddressValidationError is the validation error returned by
+// EnvoyInternalAddress.Validate if the designated constraints aren't met.
+type EnvoyInternalAddressValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e EnvoyInternalAddressValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e EnvoyInternalAddressValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e EnvoyInternalAddressValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e EnvoyInternalAddressValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e EnvoyInternalAddressValidationError) ErrorName() string {
+ return "EnvoyInternalAddressValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e EnvoyInternalAddressValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sEnvoyInternalAddress.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = EnvoyInternalAddressValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = EnvoyInternalAddressValidationError{}
+
+// Validate checks the field values on SocketAddress with the rules defined in
+// the proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *SocketAddress) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on SocketAddress with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in SocketAddressMultiError, or
+// nil if none found.
+func (m *SocketAddress) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *SocketAddress) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if _, ok := SocketAddress_Protocol_name[int32(m.GetProtocol())]; !ok {
+ err := SocketAddressValidationError{
+ field: "Protocol",
+ reason: "value must be one of the defined enum values",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if utf8.RuneCountInString(m.GetAddress()) < 1 {
+ err := SocketAddressValidationError{
+ field: "Address",
+ reason: "value length must be at least 1 runes",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ // no validation rules for ResolverName
+
+ // no validation rules for Ipv4Compat
+
+ oneofPortSpecifierPresent := false
+ switch v := m.PortSpecifier.(type) {
+ case *SocketAddress_PortValue:
+ if v == nil {
+ err := SocketAddressValidationError{
+ field: "PortSpecifier",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofPortSpecifierPresent = true
+
+ if m.GetPortValue() > 65535 {
+ err := SocketAddressValidationError{
+ field: "PortValue",
+ reason: "value must be less than or equal to 65535",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ case *SocketAddress_NamedPort:
+ if v == nil {
+ err := SocketAddressValidationError{
+ field: "PortSpecifier",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofPortSpecifierPresent = true
+ // no validation rules for NamedPort
+ default:
+ _ = v // ensures v is used
+ }
+ if !oneofPortSpecifierPresent {
+ err := SocketAddressValidationError{
+ field: "PortSpecifier",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return SocketAddressMultiError(errors)
+ }
+
+ return nil
+}
+
+// SocketAddressMultiError is an error wrapping multiple validation errors
+// returned by SocketAddress.ValidateAll() if the designated constraints
+// aren't met.
+type SocketAddressMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m SocketAddressMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m SocketAddressMultiError) AllErrors() []error { return m }
+
+// SocketAddressValidationError is the validation error returned by
+// SocketAddress.Validate if the designated constraints aren't met.
+type SocketAddressValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e SocketAddressValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e SocketAddressValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e SocketAddressValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e SocketAddressValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e SocketAddressValidationError) ErrorName() string { return "SocketAddressValidationError" }
+
+// Error satisfies the builtin error interface
+func (e SocketAddressValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sSocketAddress.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = SocketAddressValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = SocketAddressValidationError{}
+
+// Validate checks the field values on TcpKeepalive with the rules defined in
+// the proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *TcpKeepalive) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on TcpKeepalive with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in TcpKeepaliveMultiError, or
+// nil if none found.
+func (m *TcpKeepalive) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *TcpKeepalive) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if all {
+ switch v := interface{}(m.GetKeepaliveProbes()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, TcpKeepaliveValidationError{
+ field: "KeepaliveProbes",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, TcpKeepaliveValidationError{
+ field: "KeepaliveProbes",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetKeepaliveProbes()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return TcpKeepaliveValidationError{
+ field: "KeepaliveProbes",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetKeepaliveTime()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, TcpKeepaliveValidationError{
+ field: "KeepaliveTime",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, TcpKeepaliveValidationError{
+ field: "KeepaliveTime",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetKeepaliveTime()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return TcpKeepaliveValidationError{
+ field: "KeepaliveTime",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetKeepaliveInterval()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, TcpKeepaliveValidationError{
+ field: "KeepaliveInterval",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, TcpKeepaliveValidationError{
+ field: "KeepaliveInterval",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetKeepaliveInterval()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return TcpKeepaliveValidationError{
+ field: "KeepaliveInterval",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return TcpKeepaliveMultiError(errors)
+ }
+
+ return nil
+}
+
+// TcpKeepaliveMultiError is an error wrapping multiple validation errors
+// returned by TcpKeepalive.ValidateAll() if the designated constraints aren't met.
+type TcpKeepaliveMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m TcpKeepaliveMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m TcpKeepaliveMultiError) AllErrors() []error { return m }
+
+// TcpKeepaliveValidationError is the validation error returned by
+// TcpKeepalive.Validate if the designated constraints aren't met.
+type TcpKeepaliveValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e TcpKeepaliveValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e TcpKeepaliveValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e TcpKeepaliveValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e TcpKeepaliveValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e TcpKeepaliveValidationError) ErrorName() string { return "TcpKeepaliveValidationError" }
+
+// Error satisfies the builtin error interface
+func (e TcpKeepaliveValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sTcpKeepalive.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = TcpKeepaliveValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = TcpKeepaliveValidationError{}
+
+// Validate checks the field values on ExtraSourceAddress with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *ExtraSourceAddress) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on ExtraSourceAddress with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// ExtraSourceAddressMultiError, or nil if none found.
+func (m *ExtraSourceAddress) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *ExtraSourceAddress) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if m.GetAddress() == nil {
+ err := ExtraSourceAddressValidationError{
+ field: "Address",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetAddress()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ExtraSourceAddressValidationError{
+ field: "Address",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ExtraSourceAddressValidationError{
+ field: "Address",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetAddress()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ExtraSourceAddressValidationError{
+ field: "Address",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetSocketOptions()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ExtraSourceAddressValidationError{
+ field: "SocketOptions",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ExtraSourceAddressValidationError{
+ field: "SocketOptions",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetSocketOptions()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ExtraSourceAddressValidationError{
+ field: "SocketOptions",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return ExtraSourceAddressMultiError(errors)
+ }
+
+ return nil
+}
+
+// ExtraSourceAddressMultiError is an error wrapping multiple validation errors
+// returned by ExtraSourceAddress.ValidateAll() if the designated constraints
+// aren't met.
+type ExtraSourceAddressMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ExtraSourceAddressMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ExtraSourceAddressMultiError) AllErrors() []error { return m }
+
+// ExtraSourceAddressValidationError is the validation error returned by
+// ExtraSourceAddress.Validate if the designated constraints aren't met.
+type ExtraSourceAddressValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ExtraSourceAddressValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ExtraSourceAddressValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ExtraSourceAddressValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ExtraSourceAddressValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ExtraSourceAddressValidationError) ErrorName() string {
+ return "ExtraSourceAddressValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e ExtraSourceAddressValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sExtraSourceAddress.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ExtraSourceAddressValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ExtraSourceAddressValidationError{}
+
+// Validate checks the field values on BindConfig with the rules defined in the
+// proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *BindConfig) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on BindConfig with the rules defined in
+// the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in BindConfigMultiError, or
+// nil if none found.
+func (m *BindConfig) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *BindConfig) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if all {
+ switch v := interface{}(m.GetSourceAddress()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, BindConfigValidationError{
+ field: "SourceAddress",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, BindConfigValidationError{
+ field: "SourceAddress",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetSourceAddress()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return BindConfigValidationError{
+ field: "SourceAddress",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetFreebind()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, BindConfigValidationError{
+ field: "Freebind",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, BindConfigValidationError{
+ field: "Freebind",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetFreebind()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return BindConfigValidationError{
+ field: "Freebind",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ for idx, item := range m.GetSocketOptions() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, BindConfigValidationError{
+ field: fmt.Sprintf("SocketOptions[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, BindConfigValidationError{
+ field: fmt.Sprintf("SocketOptions[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return BindConfigValidationError{
+ field: fmt.Sprintf("SocketOptions[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ for idx, item := range m.GetExtraSourceAddresses() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, BindConfigValidationError{
+ field: fmt.Sprintf("ExtraSourceAddresses[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, BindConfigValidationError{
+ field: fmt.Sprintf("ExtraSourceAddresses[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return BindConfigValidationError{
+ field: fmt.Sprintf("ExtraSourceAddresses[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ for idx, item := range m.GetAdditionalSourceAddresses() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, BindConfigValidationError{
+ field: fmt.Sprintf("AdditionalSourceAddresses[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, BindConfigValidationError{
+ field: fmt.Sprintf("AdditionalSourceAddresses[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return BindConfigValidationError{
+ field: fmt.Sprintf("AdditionalSourceAddresses[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return BindConfigMultiError(errors)
+ }
+
+ return nil
+}
+
+// BindConfigMultiError is an error wrapping multiple validation errors
+// returned by BindConfig.ValidateAll() if the designated constraints aren't met.
+type BindConfigMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m BindConfigMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m BindConfigMultiError) AllErrors() []error { return m }
+
+// BindConfigValidationError is the validation error returned by
+// BindConfig.Validate if the designated constraints aren't met.
+type BindConfigValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e BindConfigValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e BindConfigValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e BindConfigValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e BindConfigValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e BindConfigValidationError) ErrorName() string { return "BindConfigValidationError" }
+
+// Error satisfies the builtin error interface
+func (e BindConfigValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sBindConfig.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = BindConfigValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = BindConfigValidationError{}
+
+// Validate checks the field values on Address with the rules defined in the
+// proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *Address) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on Address with the rules defined in the
+// proto definition for this message. If any rules are violated, the result is
+// a list of violation errors wrapped in AddressMultiError, or nil if none found.
+func (m *Address) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Address) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ oneofAddressPresent := false
+ switch v := m.Address.(type) {
+ case *Address_SocketAddress:
+ if v == nil {
+ err := AddressValidationError{
+ field: "Address",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofAddressPresent = true
+
+ if all {
+ switch v := interface{}(m.GetSocketAddress()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, AddressValidationError{
+ field: "SocketAddress",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, AddressValidationError{
+ field: "SocketAddress",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetSocketAddress()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return AddressValidationError{
+ field: "SocketAddress",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *Address_Pipe:
+ if v == nil {
+ err := AddressValidationError{
+ field: "Address",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofAddressPresent = true
+
+ if all {
+ switch v := interface{}(m.GetPipe()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, AddressValidationError{
+ field: "Pipe",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, AddressValidationError{
+ field: "Pipe",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetPipe()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return AddressValidationError{
+ field: "Pipe",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *Address_EnvoyInternalAddress:
+ if v == nil {
+ err := AddressValidationError{
+ field: "Address",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofAddressPresent = true
+
+ if all {
+ switch v := interface{}(m.GetEnvoyInternalAddress()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, AddressValidationError{
+ field: "EnvoyInternalAddress",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, AddressValidationError{
+ field: "EnvoyInternalAddress",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetEnvoyInternalAddress()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return AddressValidationError{
+ field: "EnvoyInternalAddress",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ default:
+ _ = v // ensures v is used
+ }
+ if !oneofAddressPresent {
+ err := AddressValidationError{
+ field: "Address",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return AddressMultiError(errors)
+ }
+
+ return nil
+}
+
+// AddressMultiError is an error wrapping multiple validation errors returned
+// by Address.ValidateAll() if the designated constraints aren't met.
+type AddressMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m AddressMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m AddressMultiError) AllErrors() []error { return m }
+
+// AddressValidationError is the validation error returned by Address.Validate
+// if the designated constraints aren't met.
+type AddressValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e AddressValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e AddressValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e AddressValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e AddressValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e AddressValidationError) ErrorName() string { return "AddressValidationError" }
+
+// Error satisfies the builtin error interface
+func (e AddressValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sAddress.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = AddressValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = AddressValidationError{}
+
+// Validate checks the field values on CidrRange with the rules defined in the
+// proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *CidrRange) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on CidrRange with the rules defined in
+// the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in CidrRangeMultiError, or nil
+// if none found.
+func (m *CidrRange) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *CidrRange) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if utf8.RuneCountInString(m.GetAddressPrefix()) < 1 {
+ err := CidrRangeValidationError{
+ field: "AddressPrefix",
+ reason: "value length must be at least 1 runes",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if wrapper := m.GetPrefixLen(); wrapper != nil {
+
+ if wrapper.GetValue() > 128 {
+ err := CidrRangeValidationError{
+ field: "PrefixLen",
+ reason: "value must be less than or equal to 128",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return CidrRangeMultiError(errors)
+ }
+
+ return nil
+}
+
+// CidrRangeMultiError is an error wrapping multiple validation errors returned
+// by CidrRange.ValidateAll() if the designated constraints aren't met.
+type CidrRangeMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m CidrRangeMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m CidrRangeMultiError) AllErrors() []error { return m }
+
+// CidrRangeValidationError is the validation error returned by
+// CidrRange.Validate if the designated constraints aren't met.
+type CidrRangeValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e CidrRangeValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e CidrRangeValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e CidrRangeValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e CidrRangeValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e CidrRangeValidationError) ErrorName() string { return "CidrRangeValidationError" }
+
+// Error satisfies the builtin error interface
+func (e CidrRangeValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sCidrRange.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = CidrRangeValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = CidrRangeValidationError{}
diff --git a/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/backoff.pb.go b/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/backoff.pb.go
new file mode 100644
index 000000000..3502651b7
--- /dev/null
+++ b/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/backoff.pb.go
@@ -0,0 +1,198 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.23.0
+// protoc v4.23.1
+// source: envoy/config/core/v3/backoff.proto
+
+package corev3
+
+import (
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ proto "github.com/golang/protobuf/proto"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ durationpb "google.golang.org/protobuf/types/known/durationpb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+// Configuration defining a jittered exponential back off strategy.
+type BackoffStrategy struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The base interval to be used for the next back off computation. It should
+ // be greater than zero and less than or equal to :ref:`max_interval
+ // `.
+ BaseInterval *durationpb.Duration `protobuf:"bytes,1,opt,name=base_interval,json=baseInterval,proto3" json:"base_interval,omitempty"`
+ // Specifies the maximum interval between retries. This parameter is optional,
+ // but must be greater than or equal to the :ref:`base_interval
+ // ` if set. The default
+ // is 10 times the :ref:`base_interval
+ // `.
+ MaxInterval *durationpb.Duration `protobuf:"bytes,2,opt,name=max_interval,json=maxInterval,proto3" json:"max_interval,omitempty"`
+}
+
+func (x *BackoffStrategy) Reset() {
+ *x = BackoffStrategy{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_backoff_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *BackoffStrategy) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*BackoffStrategy) ProtoMessage() {}
+
+func (x *BackoffStrategy) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_backoff_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use BackoffStrategy.ProtoReflect.Descriptor instead.
+func (*BackoffStrategy) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_backoff_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *BackoffStrategy) GetBaseInterval() *durationpb.Duration {
+ if x != nil {
+ return x.BaseInterval
+ }
+ return nil
+}
+
+func (x *BackoffStrategy) GetMaxInterval() *durationpb.Duration {
+ if x != nil {
+ return x.MaxInterval
+ }
+ return nil
+}
+
+var File_envoy_config_core_v3_backoff_proto protoreflect.FileDescriptor
+
+var file_envoy_config_core_v3_backoff_proto_rawDesc = []byte{
+ 0x0a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63,
+ 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61,
+ 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61,
+ 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f,
+ 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73,
+ 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61,
+ 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xd3, 0x01, 0x0a, 0x0f, 0x42, 0x61, 0x63, 0x6b, 0x6f, 0x66,
+ 0x66, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x12, 0x4e, 0x0a, 0x0d, 0x62, 0x61, 0x73,
+ 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0e, 0xfa, 0x42, 0x0b,
+ 0xaa, 0x01, 0x08, 0x08, 0x01, 0x32, 0x04, 0x10, 0xc0, 0x84, 0x3d, 0x52, 0x0c, 0x62, 0x61, 0x73,
+ 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x46, 0x0a, 0x0c, 0x6d, 0x61, 0x78,
+ 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0xaa,
+ 0x01, 0x02, 0x2a, 0x00, 0x52, 0x0b, 0x6d, 0x61, 0x78, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61,
+ 0x6c, 0x3a, 0x28, 0x9a, 0xc5, 0x88, 0x1e, 0x23, 0x0a, 0x21, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e,
+ 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x42, 0x61, 0x63, 0x6b,
+ 0x6f, 0x66, 0x66, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x42, 0x80, 0x01, 0x0a, 0x22,
+ 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e,
+ 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e,
+ 0x76, 0x33, 0x42, 0x0c, 0x42, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x50, 0x72, 0x6f, 0x74, 0x6f,
+ 0x50, 0x01, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65,
+ 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e,
+ 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x3b,
+ 0x63, 0x6f, 0x72, 0x65, 0x76, 0x33, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_config_core_v3_backoff_proto_rawDescOnce sync.Once
+ file_envoy_config_core_v3_backoff_proto_rawDescData = file_envoy_config_core_v3_backoff_proto_rawDesc
+)
+
+func file_envoy_config_core_v3_backoff_proto_rawDescGZIP() []byte {
+ file_envoy_config_core_v3_backoff_proto_rawDescOnce.Do(func() {
+ file_envoy_config_core_v3_backoff_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_core_v3_backoff_proto_rawDescData)
+ })
+ return file_envoy_config_core_v3_backoff_proto_rawDescData
+}
+
+var file_envoy_config_core_v3_backoff_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_envoy_config_core_v3_backoff_proto_goTypes = []interface{}{
+ (*BackoffStrategy)(nil), // 0: envoy.config.core.v3.BackoffStrategy
+ (*durationpb.Duration)(nil), // 1: google.protobuf.Duration
+}
+var file_envoy_config_core_v3_backoff_proto_depIdxs = []int32{
+ 1, // 0: envoy.config.core.v3.BackoffStrategy.base_interval:type_name -> google.protobuf.Duration
+ 1, // 1: envoy.config.core.v3.BackoffStrategy.max_interval:type_name -> google.protobuf.Duration
+ 2, // [2:2] is the sub-list for method output_type
+ 2, // [2:2] is the sub-list for method input_type
+ 2, // [2:2] is the sub-list for extension type_name
+ 2, // [2:2] is the sub-list for extension extendee
+ 0, // [0:2] is the sub-list for field type_name
+}
+
+func init() { file_envoy_config_core_v3_backoff_proto_init() }
+func file_envoy_config_core_v3_backoff_proto_init() {
+ if File_envoy_config_core_v3_backoff_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_config_core_v3_backoff_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*BackoffStrategy); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_config_core_v3_backoff_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_config_core_v3_backoff_proto_goTypes,
+ DependencyIndexes: file_envoy_config_core_v3_backoff_proto_depIdxs,
+ MessageInfos: file_envoy_config_core_v3_backoff_proto_msgTypes,
+ }.Build()
+ File_envoy_config_core_v3_backoff_proto = out.File
+ file_envoy_config_core_v3_backoff_proto_rawDesc = nil
+ file_envoy_config_core_v3_backoff_proto_goTypes = nil
+ file_envoy_config_core_v3_backoff_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/backoff.pb.validate.go b/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/backoff.pb.validate.go
new file mode 100644
index 000000000..c9b6590cc
--- /dev/null
+++ b/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/backoff.pb.validate.go
@@ -0,0 +1,207 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/config/core/v3/backoff.proto
+
+package corev3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on BackoffStrategy with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// first error encountered is returned, or nil if there are no violations.
+func (m *BackoffStrategy) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on BackoffStrategy with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// BackoffStrategyMultiError, or nil if none found.
+func (m *BackoffStrategy) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *BackoffStrategy) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if m.GetBaseInterval() == nil {
+ err := BackoffStrategyValidationError{
+ field: "BaseInterval",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if d := m.GetBaseInterval(); d != nil {
+ dur, err := d.AsDuration(), d.CheckValid()
+ if err != nil {
+ err = BackoffStrategyValidationError{
+ field: "BaseInterval",
+ reason: "value is not a valid duration",
+ cause: err,
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ } else {
+
+ gte := time.Duration(0*time.Second + 1000000*time.Nanosecond)
+
+ if dur < gte {
+ err := BackoffStrategyValidationError{
+ field: "BaseInterval",
+ reason: "value must be greater than or equal to 1ms",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ }
+ }
+
+ if d := m.GetMaxInterval(); d != nil {
+ dur, err := d.AsDuration(), d.CheckValid()
+ if err != nil {
+ err = BackoffStrategyValidationError{
+ field: "MaxInterval",
+ reason: "value is not a valid duration",
+ cause: err,
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ } else {
+
+ gt := time.Duration(0*time.Second + 0*time.Nanosecond)
+
+ if dur <= gt {
+ err := BackoffStrategyValidationError{
+ field: "MaxInterval",
+ reason: "value must be greater than 0s",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ }
+ }
+
+ if len(errors) > 0 {
+ return BackoffStrategyMultiError(errors)
+ }
+
+ return nil
+}
+
+// BackoffStrategyMultiError is an error wrapping multiple validation errors
+// returned by BackoffStrategy.ValidateAll() if the designated constraints
+// aren't met.
+type BackoffStrategyMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m BackoffStrategyMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m BackoffStrategyMultiError) AllErrors() []error { return m }
+
+// BackoffStrategyValidationError is the validation error returned by
+// BackoffStrategy.Validate if the designated constraints aren't met.
+type BackoffStrategyValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e BackoffStrategyValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e BackoffStrategyValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e BackoffStrategyValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e BackoffStrategyValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e BackoffStrategyValidationError) ErrorName() string { return "BackoffStrategyValidationError" }
+
+// Error satisfies the builtin error interface
+func (e BackoffStrategyValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sBackoffStrategy.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = BackoffStrategyValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = BackoffStrategyValidationError{}
diff --git a/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/base.pb.go b/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/base.pb.go
new file mode 100644
index 000000000..de869d950
--- /dev/null
+++ b/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/base.pb.go
@@ -0,0 +1,2636 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.23.0
+// protoc v4.23.1
+// source: envoy/config/core/v3/base.proto
+
+package corev3
+
+import (
+ _ "github.com/cilium/proxy/go/envoy/annotations"
+ v3 "github.com/cilium/proxy/go/envoy/type/v3"
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ v31 "github.com/cncf/xds/go/xds/core/v3"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ proto "github.com/golang/protobuf/proto"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+ wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+// Envoy supports :ref:`upstream priority routing
+// ` both at the route and the virtual
+// cluster level. The current priority implementation uses different connection
+// pool and circuit breaking settings for each priority level. This means that
+// even for HTTP/2 requests, two physical connections will be used to an
+// upstream host. In the future Envoy will likely support true HTTP/2 priority
+// over a single upstream connection.
+type RoutingPriority int32
+
+const (
+ RoutingPriority_DEFAULT RoutingPriority = 0
+ RoutingPriority_HIGH RoutingPriority = 1
+)
+
+// Enum value maps for RoutingPriority.
+var (
+ RoutingPriority_name = map[int32]string{
+ 0: "DEFAULT",
+ 1: "HIGH",
+ }
+ RoutingPriority_value = map[string]int32{
+ "DEFAULT": 0,
+ "HIGH": 1,
+ }
+)
+
+func (x RoutingPriority) Enum() *RoutingPriority {
+ p := new(RoutingPriority)
+ *p = x
+ return p
+}
+
+func (x RoutingPriority) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (RoutingPriority) Descriptor() protoreflect.EnumDescriptor {
+ return file_envoy_config_core_v3_base_proto_enumTypes[0].Descriptor()
+}
+
+func (RoutingPriority) Type() protoreflect.EnumType {
+ return &file_envoy_config_core_v3_base_proto_enumTypes[0]
+}
+
+func (x RoutingPriority) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use RoutingPriority.Descriptor instead.
+func (RoutingPriority) EnumDescriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{0}
+}
+
+// HTTP request method.
+type RequestMethod int32
+
+const (
+ RequestMethod_METHOD_UNSPECIFIED RequestMethod = 0
+ RequestMethod_GET RequestMethod = 1
+ RequestMethod_HEAD RequestMethod = 2
+ RequestMethod_POST RequestMethod = 3
+ RequestMethod_PUT RequestMethod = 4
+ RequestMethod_DELETE RequestMethod = 5
+ RequestMethod_CONNECT RequestMethod = 6
+ RequestMethod_OPTIONS RequestMethod = 7
+ RequestMethod_TRACE RequestMethod = 8
+ RequestMethod_PATCH RequestMethod = 9
+)
+
+// Enum value maps for RequestMethod.
+var (
+ RequestMethod_name = map[int32]string{
+ 0: "METHOD_UNSPECIFIED",
+ 1: "GET",
+ 2: "HEAD",
+ 3: "POST",
+ 4: "PUT",
+ 5: "DELETE",
+ 6: "CONNECT",
+ 7: "OPTIONS",
+ 8: "TRACE",
+ 9: "PATCH",
+ }
+ RequestMethod_value = map[string]int32{
+ "METHOD_UNSPECIFIED": 0,
+ "GET": 1,
+ "HEAD": 2,
+ "POST": 3,
+ "PUT": 4,
+ "DELETE": 5,
+ "CONNECT": 6,
+ "OPTIONS": 7,
+ "TRACE": 8,
+ "PATCH": 9,
+ }
+)
+
+func (x RequestMethod) Enum() *RequestMethod {
+ p := new(RequestMethod)
+ *p = x
+ return p
+}
+
+func (x RequestMethod) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (RequestMethod) Descriptor() protoreflect.EnumDescriptor {
+ return file_envoy_config_core_v3_base_proto_enumTypes[1].Descriptor()
+}
+
+func (RequestMethod) Type() protoreflect.EnumType {
+ return &file_envoy_config_core_v3_base_proto_enumTypes[1]
+}
+
+func (x RequestMethod) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use RequestMethod.Descriptor instead.
+func (RequestMethod) EnumDescriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{1}
+}
+
+// Identifies the direction of the traffic relative to the local Envoy.
+type TrafficDirection int32
+
+const (
+ // Default option is unspecified.
+ TrafficDirection_UNSPECIFIED TrafficDirection = 0
+ // The transport is used for incoming traffic.
+ TrafficDirection_INBOUND TrafficDirection = 1
+ // The transport is used for outgoing traffic.
+ TrafficDirection_OUTBOUND TrafficDirection = 2
+)
+
+// Enum value maps for TrafficDirection.
+var (
+ TrafficDirection_name = map[int32]string{
+ 0: "UNSPECIFIED",
+ 1: "INBOUND",
+ 2: "OUTBOUND",
+ }
+ TrafficDirection_value = map[string]int32{
+ "UNSPECIFIED": 0,
+ "INBOUND": 1,
+ "OUTBOUND": 2,
+ }
+)
+
+func (x TrafficDirection) Enum() *TrafficDirection {
+ p := new(TrafficDirection)
+ *p = x
+ return p
+}
+
+func (x TrafficDirection) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (TrafficDirection) Descriptor() protoreflect.EnumDescriptor {
+ return file_envoy_config_core_v3_base_proto_enumTypes[2].Descriptor()
+}
+
+func (TrafficDirection) Type() protoreflect.EnumType {
+ return &file_envoy_config_core_v3_base_proto_enumTypes[2]
+}
+
+func (x TrafficDirection) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use TrafficDirection.Descriptor instead.
+func (TrafficDirection) EnumDescriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{2}
+}
+
+// Describes the supported actions types for header append action.
+type HeaderValueOption_HeaderAppendAction int32
+
+const (
+ // This action will append the specified value to the existing values if the header
+ // already exists. If the header doesn't exist then this will add the header with
+ // specified key and value.
+ HeaderValueOption_APPEND_IF_EXISTS_OR_ADD HeaderValueOption_HeaderAppendAction = 0
+ // This action will add the header if it doesn't already exist. If the header
+ // already exists then this will be a no-op.
+ HeaderValueOption_ADD_IF_ABSENT HeaderValueOption_HeaderAppendAction = 1
+ // This action will overwrite the specified value by discarding any existing values if
+ // the header already exists. If the header doesn't exist then this will add the header
+ // with specified key and value.
+ HeaderValueOption_OVERWRITE_IF_EXISTS_OR_ADD HeaderValueOption_HeaderAppendAction = 2
+ // This action will overwrite the specified value by discarding any existing values if
+ // the header already exists. If the header doesn't exist then this will be no-op.
+ HeaderValueOption_OVERWRITE_IF_EXISTS HeaderValueOption_HeaderAppendAction = 3
+)
+
+// Enum value maps for HeaderValueOption_HeaderAppendAction.
+var (
+ HeaderValueOption_HeaderAppendAction_name = map[int32]string{
+ 0: "APPEND_IF_EXISTS_OR_ADD",
+ 1: "ADD_IF_ABSENT",
+ 2: "OVERWRITE_IF_EXISTS_OR_ADD",
+ 3: "OVERWRITE_IF_EXISTS",
+ }
+ HeaderValueOption_HeaderAppendAction_value = map[string]int32{
+ "APPEND_IF_EXISTS_OR_ADD": 0,
+ "ADD_IF_ABSENT": 1,
+ "OVERWRITE_IF_EXISTS_OR_ADD": 2,
+ "OVERWRITE_IF_EXISTS": 3,
+ }
+)
+
+func (x HeaderValueOption_HeaderAppendAction) Enum() *HeaderValueOption_HeaderAppendAction {
+ p := new(HeaderValueOption_HeaderAppendAction)
+ *p = x
+ return p
+}
+
+func (x HeaderValueOption_HeaderAppendAction) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (HeaderValueOption_HeaderAppendAction) Descriptor() protoreflect.EnumDescriptor {
+ return file_envoy_config_core_v3_base_proto_enumTypes[3].Descriptor()
+}
+
+func (HeaderValueOption_HeaderAppendAction) Type() protoreflect.EnumType {
+ return &file_envoy_config_core_v3_base_proto_enumTypes[3]
+}
+
+func (x HeaderValueOption_HeaderAppendAction) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use HeaderValueOption_HeaderAppendAction.Descriptor instead.
+func (HeaderValueOption_HeaderAppendAction) EnumDescriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{11, 0}
+}
+
+// Identifies location of where either Envoy runs or where upstream hosts run.
+type Locality struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Region this :ref:`zone ` belongs to.
+ Region string `protobuf:"bytes,1,opt,name=region,proto3" json:"region,omitempty"`
+ // Defines the local service zone where Envoy is running. Though optional, it
+ // should be set if discovery service routing is used and the discovery
+ // service exposes :ref:`zone data `,
+ // either in this message or via :option:`--service-zone`. The meaning of zone
+ // is context dependent, e.g. `Availability Zone (AZ)
+ // `_
+ // on AWS, `Zone `_ on
+ // GCP, etc.
+ Zone string `protobuf:"bytes,2,opt,name=zone,proto3" json:"zone,omitempty"`
+ // When used for locality of upstream hosts, this field further splits zone
+ // into smaller chunks of sub-zones so they can be load balanced
+ // independently.
+ SubZone string `protobuf:"bytes,3,opt,name=sub_zone,json=subZone,proto3" json:"sub_zone,omitempty"`
+}
+
+func (x *Locality) Reset() {
+ *x = Locality{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Locality) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Locality) ProtoMessage() {}
+
+func (x *Locality) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Locality.ProtoReflect.Descriptor instead.
+func (*Locality) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Locality) GetRegion() string {
+ if x != nil {
+ return x.Region
+ }
+ return ""
+}
+
+func (x *Locality) GetZone() string {
+ if x != nil {
+ return x.Zone
+ }
+ return ""
+}
+
+func (x *Locality) GetSubZone() string {
+ if x != nil {
+ return x.SubZone
+ }
+ return ""
+}
+
+// BuildVersion combines SemVer version of extension with free-form build information
+// (i.e. 'alpha', 'private-build') as a set of strings.
+type BuildVersion struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // SemVer version of extension.
+ Version *v3.SemanticVersion `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"`
+ // Free-form build information.
+ // Envoy defines several well known keys in the source/common/version/version.h file
+ Metadata *structpb.Struct `protobuf:"bytes,2,opt,name=metadata,proto3" json:"metadata,omitempty"`
+}
+
+func (x *BuildVersion) Reset() {
+ *x = BuildVersion{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *BuildVersion) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*BuildVersion) ProtoMessage() {}
+
+func (x *BuildVersion) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use BuildVersion.ProtoReflect.Descriptor instead.
+func (*BuildVersion) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *BuildVersion) GetVersion() *v3.SemanticVersion {
+ if x != nil {
+ return x.Version
+ }
+ return nil
+}
+
+func (x *BuildVersion) GetMetadata() *structpb.Struct {
+ if x != nil {
+ return x.Metadata
+ }
+ return nil
+}
+
+// Version and identification for an Envoy extension.
+// [#next-free-field: 7]
+type Extension struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // This is the name of the Envoy filter as specified in the Envoy
+ // configuration, e.g. envoy.filters.http.router, com.acme.widget.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // Category of the extension.
+ // Extension category names use reverse DNS notation. For instance "envoy.filters.listener"
+ // for Envoy's built-in listener filters or "com.acme.filters.http" for HTTP filters from
+ // acme.com vendor.
+ // [#comment:TODO(yanavlasov): Link to the doc with existing envoy category names.]
+ Category string `protobuf:"bytes,2,opt,name=category,proto3" json:"category,omitempty"`
+ // [#not-implemented-hide:] Type descriptor of extension configuration proto.
+ // [#comment:TODO(yanavlasov): Link to the doc with existing configuration protos.]
+ // [#comment:TODO(yanavlasov): Add tests when PR #9391 lands.]
+ //
+ // Deprecated: Do not use.
+ TypeDescriptor string `protobuf:"bytes,3,opt,name=type_descriptor,json=typeDescriptor,proto3" json:"type_descriptor,omitempty"`
+ // The version is a property of the extension and maintained independently
+ // of other extensions and the Envoy API.
+ // This field is not set when extension did not provide version information.
+ Version *BuildVersion `protobuf:"bytes,4,opt,name=version,proto3" json:"version,omitempty"`
+ // Indicates that the extension is present but was disabled via dynamic configuration.
+ Disabled bool `protobuf:"varint,5,opt,name=disabled,proto3" json:"disabled,omitempty"`
+ // Type URLs of extension configuration protos.
+ TypeUrls []string `protobuf:"bytes,6,rep,name=type_urls,json=typeUrls,proto3" json:"type_urls,omitempty"`
+}
+
+func (x *Extension) Reset() {
+ *x = Extension{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Extension) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Extension) ProtoMessage() {}
+
+func (x *Extension) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Extension.ProtoReflect.Descriptor instead.
+func (*Extension) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *Extension) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *Extension) GetCategory() string {
+ if x != nil {
+ return x.Category
+ }
+ return ""
+}
+
+// Deprecated: Do not use.
+func (x *Extension) GetTypeDescriptor() string {
+ if x != nil {
+ return x.TypeDescriptor
+ }
+ return ""
+}
+
+func (x *Extension) GetVersion() *BuildVersion {
+ if x != nil {
+ return x.Version
+ }
+ return nil
+}
+
+func (x *Extension) GetDisabled() bool {
+ if x != nil {
+ return x.Disabled
+ }
+ return false
+}
+
+func (x *Extension) GetTypeUrls() []string {
+ if x != nil {
+ return x.TypeUrls
+ }
+ return nil
+}
+
+// Identifies a specific Envoy instance. The node identifier is presented to the
+// management server, which may use this identifier to distinguish per Envoy
+// configuration for serving.
+// [#next-free-field: 13]
+type Node struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // An opaque node identifier for the Envoy node. This also provides the local
+ // service node name. It should be set if any of the following features are
+ // used: :ref:`statsd `, :ref:`CDS
+ // `, and :ref:`HTTP tracing
+ // `, either in this message or via
+ // :option:`--service-node`.
+ Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+ // Defines the local service cluster name where Envoy is running. Though
+ // optional, it should be set if any of the following features are used:
+ // :ref:`statsd `, :ref:`health check cluster
+ // verification
+ // `,
+ // :ref:`runtime override directory `,
+ // :ref:`user agent addition
+ // `,
+ // :ref:`HTTP global rate limiting `,
+ // :ref:`CDS `, and :ref:`HTTP tracing
+ // `, either in this message or via
+ // :option:`--service-cluster`.
+ Cluster string `protobuf:"bytes,2,opt,name=cluster,proto3" json:"cluster,omitempty"`
+ // Opaque metadata extending the node identifier. Envoy will pass this
+ // directly to the management server.
+ Metadata *structpb.Struct `protobuf:"bytes,3,opt,name=metadata,proto3" json:"metadata,omitempty"`
+ // Map from xDS resource type URL to dynamic context parameters. These may vary at runtime (unlike
+ // other fields in this message). For example, the xDS client may have a shard identifier that
+ // changes during the lifetime of the xDS client. In Envoy, this would be achieved by updating the
+ // dynamic context on the Server::Instance's LocalInfo context provider. The shard ID dynamic
+ // parameter then appears in this field during future discovery requests.
+ DynamicParameters map[string]*v31.ContextParams `protobuf:"bytes,12,rep,name=dynamic_parameters,json=dynamicParameters,proto3" json:"dynamic_parameters,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ // Locality specifying where the Envoy instance is running.
+ Locality *Locality `protobuf:"bytes,4,opt,name=locality,proto3" json:"locality,omitempty"`
+ // Free-form string that identifies the entity requesting config.
+ // E.g. "envoy" or "grpc"
+ UserAgentName string `protobuf:"bytes,6,opt,name=user_agent_name,json=userAgentName,proto3" json:"user_agent_name,omitempty"`
+ // Types that are assignable to UserAgentVersionType:
+ //
+ // *Node_UserAgentVersion
+ // *Node_UserAgentBuildVersion
+ UserAgentVersionType isNode_UserAgentVersionType `protobuf_oneof:"user_agent_version_type"`
+ // List of extensions and their versions supported by the node.
+ Extensions []*Extension `protobuf:"bytes,9,rep,name=extensions,proto3" json:"extensions,omitempty"`
+ // Client feature support list. These are well known features described
+ // in the Envoy API repository for a given major version of an API. Client features
+ // use reverse DNS naming scheme, for example “com.acme.feature“.
+ // See :ref:`the list of features ` that xDS client may
+ // support.
+ ClientFeatures []string `protobuf:"bytes,10,rep,name=client_features,json=clientFeatures,proto3" json:"client_features,omitempty"`
+ // Known listening ports on the node as a generic hint to the management server
+ // for filtering :ref:`listeners ` to be returned. For example,
+ // if there is a listener bound to port 80, the list can optionally contain the
+ // SocketAddress “(0.0.0.0,80)“. The field is optional and just a hint.
+ //
+ // Deprecated: Do not use.
+ ListeningAddresses []*Address `protobuf:"bytes,11,rep,name=listening_addresses,json=listeningAddresses,proto3" json:"listening_addresses,omitempty"`
+}
+
+func (x *Node) Reset() {
+ *x = Node{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Node) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Node) ProtoMessage() {}
+
+func (x *Node) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Node.ProtoReflect.Descriptor instead.
+func (*Node) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *Node) GetId() string {
+ if x != nil {
+ return x.Id
+ }
+ return ""
+}
+
+func (x *Node) GetCluster() string {
+ if x != nil {
+ return x.Cluster
+ }
+ return ""
+}
+
+func (x *Node) GetMetadata() *structpb.Struct {
+ if x != nil {
+ return x.Metadata
+ }
+ return nil
+}
+
+func (x *Node) GetDynamicParameters() map[string]*v31.ContextParams {
+ if x != nil {
+ return x.DynamicParameters
+ }
+ return nil
+}
+
+func (x *Node) GetLocality() *Locality {
+ if x != nil {
+ return x.Locality
+ }
+ return nil
+}
+
+func (x *Node) GetUserAgentName() string {
+ if x != nil {
+ return x.UserAgentName
+ }
+ return ""
+}
+
+func (m *Node) GetUserAgentVersionType() isNode_UserAgentVersionType {
+ if m != nil {
+ return m.UserAgentVersionType
+ }
+ return nil
+}
+
+func (x *Node) GetUserAgentVersion() string {
+ if x, ok := x.GetUserAgentVersionType().(*Node_UserAgentVersion); ok {
+ return x.UserAgentVersion
+ }
+ return ""
+}
+
+func (x *Node) GetUserAgentBuildVersion() *BuildVersion {
+ if x, ok := x.GetUserAgentVersionType().(*Node_UserAgentBuildVersion); ok {
+ return x.UserAgentBuildVersion
+ }
+ return nil
+}
+
+func (x *Node) GetExtensions() []*Extension {
+ if x != nil {
+ return x.Extensions
+ }
+ return nil
+}
+
+func (x *Node) GetClientFeatures() []string {
+ if x != nil {
+ return x.ClientFeatures
+ }
+ return nil
+}
+
+// Deprecated: Do not use.
+func (x *Node) GetListeningAddresses() []*Address {
+ if x != nil {
+ return x.ListeningAddresses
+ }
+ return nil
+}
+
+type isNode_UserAgentVersionType interface {
+ isNode_UserAgentVersionType()
+}
+
+type Node_UserAgentVersion struct {
+ // Free-form string that identifies the version of the entity requesting config.
+ // E.g. "1.12.2" or "abcd1234", or "SpecialEnvoyBuild"
+ UserAgentVersion string `protobuf:"bytes,7,opt,name=user_agent_version,json=userAgentVersion,proto3,oneof"`
+}
+
+type Node_UserAgentBuildVersion struct {
+ // Structured version of the entity requesting config.
+ UserAgentBuildVersion *BuildVersion `protobuf:"bytes,8,opt,name=user_agent_build_version,json=userAgentBuildVersion,proto3,oneof"`
+}
+
+func (*Node_UserAgentVersion) isNode_UserAgentVersionType() {}
+
+func (*Node_UserAgentBuildVersion) isNode_UserAgentVersionType() {}
+
+// Metadata provides additional inputs to filters based on matched listeners,
+// filter chains, routes and endpoints. It is structured as a map, usually from
+// filter name (in reverse DNS format) to metadata specific to the filter. Metadata
+// key-values for a filter are merged as connection and request handling occurs,
+// with later values for the same key overriding earlier values.
+//
+// An example use of metadata is providing additional values to
+// http_connection_manager in the envoy.http_connection_manager.access_log
+// namespace.
+//
+// Another example use of metadata is to per service config info in cluster metadata, which may get
+// consumed by multiple filters.
+//
+// For load balancing, Metadata provides a means to subset cluster endpoints.
+// Endpoints have a Metadata object associated and routes contain a Metadata
+// object to match against. There are some well defined metadata used today for
+// this purpose:
+//
+// - “{"envoy.lb": {"canary": }}“ This indicates the canary status of an
+// endpoint and is also used during header processing
+// (x-envoy-upstream-canary) and for stats purposes.
+//
+// [#next-major-version: move to type/metadata/v2]
+type Metadata struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Key is the reverse DNS filter name, e.g. com.acme.widget. The “envoy.*“
+ // namespace is reserved for Envoy's built-in filters.
+ // If both “filter_metadata“ and
+ // :ref:`typed_filter_metadata `
+ // fields are present in the metadata with same keys,
+ // only “typed_filter_metadata“ field will be parsed.
+ FilterMetadata map[string]*structpb.Struct `protobuf:"bytes,1,rep,name=filter_metadata,json=filterMetadata,proto3" json:"filter_metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ // Key is the reverse DNS filter name, e.g. com.acme.widget. The “envoy.*“
+ // namespace is reserved for Envoy's built-in filters.
+ // The value is encoded as google.protobuf.Any.
+ // If both :ref:`filter_metadata `
+ // and “typed_filter_metadata“ fields are present in the metadata with same keys,
+ // only “typed_filter_metadata“ field will be parsed.
+ TypedFilterMetadata map[string]*anypb.Any `protobuf:"bytes,2,rep,name=typed_filter_metadata,json=typedFilterMetadata,proto3" json:"typed_filter_metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+func (x *Metadata) Reset() {
+ *x = Metadata{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Metadata) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Metadata) ProtoMessage() {}
+
+func (x *Metadata) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Metadata.ProtoReflect.Descriptor instead.
+func (*Metadata) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *Metadata) GetFilterMetadata() map[string]*structpb.Struct {
+ if x != nil {
+ return x.FilterMetadata
+ }
+ return nil
+}
+
+func (x *Metadata) GetTypedFilterMetadata() map[string]*anypb.Any {
+ if x != nil {
+ return x.TypedFilterMetadata
+ }
+ return nil
+}
+
+// Runtime derived uint32 with a default when not specified.
+type RuntimeUInt32 struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Default value if runtime value is not available.
+ DefaultValue uint32 `protobuf:"varint,2,opt,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"`
+ // Runtime key to get value for comparison. This value is used if defined.
+ RuntimeKey string `protobuf:"bytes,3,opt,name=runtime_key,json=runtimeKey,proto3" json:"runtime_key,omitempty"`
+}
+
+func (x *RuntimeUInt32) Reset() {
+ *x = RuntimeUInt32{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RuntimeUInt32) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RuntimeUInt32) ProtoMessage() {}
+
+func (x *RuntimeUInt32) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RuntimeUInt32.ProtoReflect.Descriptor instead.
+func (*RuntimeUInt32) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *RuntimeUInt32) GetDefaultValue() uint32 {
+ if x != nil {
+ return x.DefaultValue
+ }
+ return 0
+}
+
+func (x *RuntimeUInt32) GetRuntimeKey() string {
+ if x != nil {
+ return x.RuntimeKey
+ }
+ return ""
+}
+
+// Runtime derived percentage with a default when not specified.
+type RuntimePercent struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Default value if runtime value is not available.
+ DefaultValue *v3.Percent `protobuf:"bytes,1,opt,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"`
+ // Runtime key to get value for comparison. This value is used if defined.
+ RuntimeKey string `protobuf:"bytes,2,opt,name=runtime_key,json=runtimeKey,proto3" json:"runtime_key,omitempty"`
+}
+
+func (x *RuntimePercent) Reset() {
+ *x = RuntimePercent{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RuntimePercent) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RuntimePercent) ProtoMessage() {}
+
+func (x *RuntimePercent) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RuntimePercent.ProtoReflect.Descriptor instead.
+func (*RuntimePercent) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *RuntimePercent) GetDefaultValue() *v3.Percent {
+ if x != nil {
+ return x.DefaultValue
+ }
+ return nil
+}
+
+func (x *RuntimePercent) GetRuntimeKey() string {
+ if x != nil {
+ return x.RuntimeKey
+ }
+ return ""
+}
+
+// Runtime derived double with a default when not specified.
+type RuntimeDouble struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Default value if runtime value is not available.
+ DefaultValue float64 `protobuf:"fixed64,1,opt,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"`
+ // Runtime key to get value for comparison. This value is used if defined.
+ RuntimeKey string `protobuf:"bytes,2,opt,name=runtime_key,json=runtimeKey,proto3" json:"runtime_key,omitempty"`
+}
+
+func (x *RuntimeDouble) Reset() {
+ *x = RuntimeDouble{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RuntimeDouble) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RuntimeDouble) ProtoMessage() {}
+
+func (x *RuntimeDouble) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RuntimeDouble.ProtoReflect.Descriptor instead.
+func (*RuntimeDouble) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{7}
+}
+
+func (x *RuntimeDouble) GetDefaultValue() float64 {
+ if x != nil {
+ return x.DefaultValue
+ }
+ return 0
+}
+
+func (x *RuntimeDouble) GetRuntimeKey() string {
+ if x != nil {
+ return x.RuntimeKey
+ }
+ return ""
+}
+
+// Runtime derived bool with a default when not specified.
+type RuntimeFeatureFlag struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Default value if runtime value is not available.
+ DefaultValue *wrapperspb.BoolValue `protobuf:"bytes,1,opt,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"`
+ // Runtime key to get value for comparison. This value is used if defined. The boolean value must
+ // be represented via its
+ // `canonical JSON encoding `_.
+ RuntimeKey string `protobuf:"bytes,2,opt,name=runtime_key,json=runtimeKey,proto3" json:"runtime_key,omitempty"`
+}
+
+func (x *RuntimeFeatureFlag) Reset() {
+ *x = RuntimeFeatureFlag{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RuntimeFeatureFlag) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RuntimeFeatureFlag) ProtoMessage() {}
+
+func (x *RuntimeFeatureFlag) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RuntimeFeatureFlag.ProtoReflect.Descriptor instead.
+func (*RuntimeFeatureFlag) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{8}
+}
+
+func (x *RuntimeFeatureFlag) GetDefaultValue() *wrapperspb.BoolValue {
+ if x != nil {
+ return x.DefaultValue
+ }
+ return nil
+}
+
+func (x *RuntimeFeatureFlag) GetRuntimeKey() string {
+ if x != nil {
+ return x.RuntimeKey
+ }
+ return ""
+}
+
+// Query parameter name/value pair.
+type QueryParameter struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The key of the query parameter. Case sensitive.
+ Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+ // The value of the query parameter.
+ Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (x *QueryParameter) Reset() {
+ *x = QueryParameter{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *QueryParameter) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*QueryParameter) ProtoMessage() {}
+
+func (x *QueryParameter) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use QueryParameter.ProtoReflect.Descriptor instead.
+func (*QueryParameter) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{9}
+}
+
+func (x *QueryParameter) GetKey() string {
+ if x != nil {
+ return x.Key
+ }
+ return ""
+}
+
+func (x *QueryParameter) GetValue() string {
+ if x != nil {
+ return x.Value
+ }
+ return ""
+}
+
+// Header name/value pair.
+type HeaderValue struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Header name.
+ Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+ // Header value.
+ //
+ // The same :ref:`format specifier ` as used for
+ // :ref:`HTTP access logging ` applies here, however
+ // unknown header values are replaced with the empty string instead of “-“.
+ // Header value is encoded as string. This does not work for non-utf8 characters.
+ // Only one of “value“ or “raw_value“ can be set.
+ Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+ // Header value is encoded as bytes which can support non-utf8 characters.
+ // Only one of “value“ or “raw_value“ can be set.
+ RawValue []byte `protobuf:"bytes,3,opt,name=raw_value,json=rawValue,proto3" json:"raw_value,omitempty"`
+}
+
+func (x *HeaderValue) Reset() {
+ *x = HeaderValue{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *HeaderValue) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HeaderValue) ProtoMessage() {}
+
+func (x *HeaderValue) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[10]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HeaderValue.ProtoReflect.Descriptor instead.
+func (*HeaderValue) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{10}
+}
+
+func (x *HeaderValue) GetKey() string {
+ if x != nil {
+ return x.Key
+ }
+ return ""
+}
+
+func (x *HeaderValue) GetValue() string {
+ if x != nil {
+ return x.Value
+ }
+ return ""
+}
+
+func (x *HeaderValue) GetRawValue() []byte {
+ if x != nil {
+ return x.RawValue
+ }
+ return nil
+}
+
+// Header name/value pair plus option to control append behavior.
+type HeaderValueOption struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Header name/value pair that this option applies to.
+ Header *HeaderValue `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+ // Should the value be appended? If true (default), the value is appended to
+ // existing values. Otherwise it replaces any existing values.
+ // This field is deprecated and please use
+ // :ref:`append_action ` as replacement.
+ //
+ // .. note::
+ //
+ // The :ref:`external authorization service ` and
+ // :ref:`external processor service ` have
+ // default value (``false``) for this field.
+ //
+ // Deprecated: Do not use.
+ Append *wrapperspb.BoolValue `protobuf:"bytes,2,opt,name=append,proto3" json:"append,omitempty"`
+ // Describes the action taken to append/overwrite the given value for an existing header
+ // or to only add this header if it's absent.
+ // Value defaults to :ref:`APPEND_IF_EXISTS_OR_ADD
+ // `.
+ AppendAction HeaderValueOption_HeaderAppendAction `protobuf:"varint,3,opt,name=append_action,json=appendAction,proto3,enum=envoy.config.core.v3.HeaderValueOption_HeaderAppendAction" json:"append_action,omitempty"`
+ // Is the header value allowed to be empty? If false (default), custom headers with empty values are dropped,
+ // otherwise they are added.
+ KeepEmptyValue bool `protobuf:"varint,4,opt,name=keep_empty_value,json=keepEmptyValue,proto3" json:"keep_empty_value,omitempty"`
+}
+
+func (x *HeaderValueOption) Reset() {
+ *x = HeaderValueOption{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *HeaderValueOption) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HeaderValueOption) ProtoMessage() {}
+
+func (x *HeaderValueOption) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[11]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HeaderValueOption.ProtoReflect.Descriptor instead.
+func (*HeaderValueOption) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{11}
+}
+
+func (x *HeaderValueOption) GetHeader() *HeaderValue {
+ if x != nil {
+ return x.Header
+ }
+ return nil
+}
+
+// Deprecated: Do not use.
+func (x *HeaderValueOption) GetAppend() *wrapperspb.BoolValue {
+ if x != nil {
+ return x.Append
+ }
+ return nil
+}
+
+func (x *HeaderValueOption) GetAppendAction() HeaderValueOption_HeaderAppendAction {
+ if x != nil {
+ return x.AppendAction
+ }
+ return HeaderValueOption_APPEND_IF_EXISTS_OR_ADD
+}
+
+func (x *HeaderValueOption) GetKeepEmptyValue() bool {
+ if x != nil {
+ return x.KeepEmptyValue
+ }
+ return false
+}
+
+// Wrapper for a set of headers.
+type HeaderMap struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Headers []*HeaderValue `protobuf:"bytes,1,rep,name=headers,proto3" json:"headers,omitempty"`
+}
+
+func (x *HeaderMap) Reset() {
+ *x = HeaderMap{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *HeaderMap) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HeaderMap) ProtoMessage() {}
+
+func (x *HeaderMap) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[12]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HeaderMap.ProtoReflect.Descriptor instead.
+func (*HeaderMap) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{12}
+}
+
+func (x *HeaderMap) GetHeaders() []*HeaderValue {
+ if x != nil {
+ return x.Headers
+ }
+ return nil
+}
+
+// A directory that is watched for changes, e.g. by inotify on Linux. Move/rename
+// events inside this directory trigger the watch.
+type WatchedDirectory struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Directory path to watch.
+ Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
+}
+
+func (x *WatchedDirectory) Reset() {
+ *x = WatchedDirectory{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *WatchedDirectory) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*WatchedDirectory) ProtoMessage() {}
+
+func (x *WatchedDirectory) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[13]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use WatchedDirectory.ProtoReflect.Descriptor instead.
+func (*WatchedDirectory) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{13}
+}
+
+func (x *WatchedDirectory) GetPath() string {
+ if x != nil {
+ return x.Path
+ }
+ return ""
+}
+
+// Data source consisting of a file, an inline value, or an environment variable.
+type DataSource struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to Specifier:
+ //
+ // *DataSource_Filename
+ // *DataSource_InlineBytes
+ // *DataSource_InlineString
+ // *DataSource_EnvironmentVariable
+ Specifier isDataSource_Specifier `protobuf_oneof:"specifier"`
+}
+
+func (x *DataSource) Reset() {
+ *x = DataSource{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DataSource) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DataSource) ProtoMessage() {}
+
+func (x *DataSource) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[14]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DataSource.ProtoReflect.Descriptor instead.
+func (*DataSource) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{14}
+}
+
+func (m *DataSource) GetSpecifier() isDataSource_Specifier {
+ if m != nil {
+ return m.Specifier
+ }
+ return nil
+}
+
+func (x *DataSource) GetFilename() string {
+ if x, ok := x.GetSpecifier().(*DataSource_Filename); ok {
+ return x.Filename
+ }
+ return ""
+}
+
+func (x *DataSource) GetInlineBytes() []byte {
+ if x, ok := x.GetSpecifier().(*DataSource_InlineBytes); ok {
+ return x.InlineBytes
+ }
+ return nil
+}
+
+func (x *DataSource) GetInlineString() string {
+ if x, ok := x.GetSpecifier().(*DataSource_InlineString); ok {
+ return x.InlineString
+ }
+ return ""
+}
+
+func (x *DataSource) GetEnvironmentVariable() string {
+ if x, ok := x.GetSpecifier().(*DataSource_EnvironmentVariable); ok {
+ return x.EnvironmentVariable
+ }
+ return ""
+}
+
+type isDataSource_Specifier interface {
+ isDataSource_Specifier()
+}
+
+type DataSource_Filename struct {
+ // Local filesystem data source.
+ Filename string `protobuf:"bytes,1,opt,name=filename,proto3,oneof"`
+}
+
+type DataSource_InlineBytes struct {
+ // Bytes inlined in the configuration.
+ InlineBytes []byte `protobuf:"bytes,2,opt,name=inline_bytes,json=inlineBytes,proto3,oneof"`
+}
+
+type DataSource_InlineString struct {
+ // String inlined in the configuration.
+ InlineString string `protobuf:"bytes,3,opt,name=inline_string,json=inlineString,proto3,oneof"`
+}
+
+type DataSource_EnvironmentVariable struct {
+ // Environment variable data source.
+ EnvironmentVariable string `protobuf:"bytes,4,opt,name=environment_variable,json=environmentVariable,proto3,oneof"`
+}
+
+func (*DataSource_Filename) isDataSource_Specifier() {}
+
+func (*DataSource_InlineBytes) isDataSource_Specifier() {}
+
+func (*DataSource_InlineString) isDataSource_Specifier() {}
+
+func (*DataSource_EnvironmentVariable) isDataSource_Specifier() {}
+
+// The message specifies the retry policy of remote data source when fetching fails.
+type RetryPolicy struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Specifies parameters that control :ref:`retry backoff strategy `.
+ // This parameter is optional, in which case the default base interval is 1000 milliseconds. The
+ // default maximum interval is 10 times the base interval.
+ RetryBackOff *BackoffStrategy `protobuf:"bytes,1,opt,name=retry_back_off,json=retryBackOff,proto3" json:"retry_back_off,omitempty"`
+ // Specifies the allowed number of retries. This parameter is optional and
+ // defaults to 1.
+ NumRetries *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=num_retries,json=numRetries,proto3" json:"num_retries,omitempty"`
+}
+
+func (x *RetryPolicy) Reset() {
+ *x = RetryPolicy{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[15]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RetryPolicy) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RetryPolicy) ProtoMessage() {}
+
+func (x *RetryPolicy) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[15]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RetryPolicy.ProtoReflect.Descriptor instead.
+func (*RetryPolicy) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{15}
+}
+
+func (x *RetryPolicy) GetRetryBackOff() *BackoffStrategy {
+ if x != nil {
+ return x.RetryBackOff
+ }
+ return nil
+}
+
+func (x *RetryPolicy) GetNumRetries() *wrapperspb.UInt32Value {
+ if x != nil {
+ return x.NumRetries
+ }
+ return nil
+}
+
+// The message specifies how to fetch data from remote and how to verify it.
+type RemoteDataSource struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The HTTP URI to fetch the remote data.
+ HttpUri *HttpUri `protobuf:"bytes,1,opt,name=http_uri,json=httpUri,proto3" json:"http_uri,omitempty"`
+ // SHA256 string for verifying data.
+ Sha256 string `protobuf:"bytes,2,opt,name=sha256,proto3" json:"sha256,omitempty"`
+ // Retry policy for fetching remote data.
+ RetryPolicy *RetryPolicy `protobuf:"bytes,3,opt,name=retry_policy,json=retryPolicy,proto3" json:"retry_policy,omitempty"`
+}
+
+func (x *RemoteDataSource) Reset() {
+ *x = RemoteDataSource{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[16]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RemoteDataSource) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RemoteDataSource) ProtoMessage() {}
+
+func (x *RemoteDataSource) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[16]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RemoteDataSource.ProtoReflect.Descriptor instead.
+func (*RemoteDataSource) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{16}
+}
+
+func (x *RemoteDataSource) GetHttpUri() *HttpUri {
+ if x != nil {
+ return x.HttpUri
+ }
+ return nil
+}
+
+func (x *RemoteDataSource) GetSha256() string {
+ if x != nil {
+ return x.Sha256
+ }
+ return ""
+}
+
+func (x *RemoteDataSource) GetRetryPolicy() *RetryPolicy {
+ if x != nil {
+ return x.RetryPolicy
+ }
+ return nil
+}
+
+// Async data source which support async data fetch.
+type AsyncDataSource struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to Specifier:
+ //
+ // *AsyncDataSource_Local
+ // *AsyncDataSource_Remote
+ Specifier isAsyncDataSource_Specifier `protobuf_oneof:"specifier"`
+}
+
+func (x *AsyncDataSource) Reset() {
+ *x = AsyncDataSource{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[17]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AsyncDataSource) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AsyncDataSource) ProtoMessage() {}
+
+func (x *AsyncDataSource) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[17]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AsyncDataSource.ProtoReflect.Descriptor instead.
+func (*AsyncDataSource) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{17}
+}
+
+func (m *AsyncDataSource) GetSpecifier() isAsyncDataSource_Specifier {
+ if m != nil {
+ return m.Specifier
+ }
+ return nil
+}
+
+func (x *AsyncDataSource) GetLocal() *DataSource {
+ if x, ok := x.GetSpecifier().(*AsyncDataSource_Local); ok {
+ return x.Local
+ }
+ return nil
+}
+
+func (x *AsyncDataSource) GetRemote() *RemoteDataSource {
+ if x, ok := x.GetSpecifier().(*AsyncDataSource_Remote); ok {
+ return x.Remote
+ }
+ return nil
+}
+
+type isAsyncDataSource_Specifier interface {
+ isAsyncDataSource_Specifier()
+}
+
+type AsyncDataSource_Local struct {
+ // Local async data source.
+ Local *DataSource `protobuf:"bytes,1,opt,name=local,proto3,oneof"`
+}
+
+type AsyncDataSource_Remote struct {
+ // Remote async data source.
+ Remote *RemoteDataSource `protobuf:"bytes,2,opt,name=remote,proto3,oneof"`
+}
+
+func (*AsyncDataSource_Local) isAsyncDataSource_Specifier() {}
+
+func (*AsyncDataSource_Remote) isAsyncDataSource_Specifier() {}
+
+// Configuration for transport socket in :ref:`listeners ` and
+// :ref:`clusters `. If the configuration is
+// empty, a default transport socket implementation and configuration will be
+// chosen based on the platform and existence of tls_context.
+type TransportSocket struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The name of the transport socket to instantiate. The name must match a supported transport
+ // socket implementation.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // Implementation specific configuration which depends on the implementation being instantiated.
+ // See the supported transport socket implementations for further documentation.
+ //
+ // Types that are assignable to ConfigType:
+ //
+ // *TransportSocket_TypedConfig
+ ConfigType isTransportSocket_ConfigType `protobuf_oneof:"config_type"`
+}
+
+func (x *TransportSocket) Reset() {
+ *x = TransportSocket{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[18]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *TransportSocket) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TransportSocket) ProtoMessage() {}
+
+func (x *TransportSocket) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[18]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TransportSocket.ProtoReflect.Descriptor instead.
+func (*TransportSocket) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{18}
+}
+
+func (x *TransportSocket) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (m *TransportSocket) GetConfigType() isTransportSocket_ConfigType {
+ if m != nil {
+ return m.ConfigType
+ }
+ return nil
+}
+
+func (x *TransportSocket) GetTypedConfig() *anypb.Any {
+ if x, ok := x.GetConfigType().(*TransportSocket_TypedConfig); ok {
+ return x.TypedConfig
+ }
+ return nil
+}
+
+type isTransportSocket_ConfigType interface {
+ isTransportSocket_ConfigType()
+}
+
+type TransportSocket_TypedConfig struct {
+ TypedConfig *anypb.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"`
+}
+
+func (*TransportSocket_TypedConfig) isTransportSocket_ConfigType() {}
+
+// Runtime derived FractionalPercent with defaults for when the numerator or denominator is not
+// specified via a runtime key.
+//
+// .. note::
+//
+// Parsing of the runtime key's data is implemented such that it may be represented as a
+// :ref:`FractionalPercent ` proto represented as JSON/YAML
+// and may also be represented as an integer with the assumption that the value is an integral
+// percentage out of 100. For instance, a runtime key lookup returning the value "42" would parse
+// as a ``FractionalPercent`` whose numerator is 42 and denominator is HUNDRED.
+type RuntimeFractionalPercent struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Default value if the runtime value's for the numerator/denominator keys are not available.
+ DefaultValue *v3.FractionalPercent `protobuf:"bytes,1,opt,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"`
+ // Runtime key for a YAML representation of a FractionalPercent.
+ RuntimeKey string `protobuf:"bytes,2,opt,name=runtime_key,json=runtimeKey,proto3" json:"runtime_key,omitempty"`
+}
+
+func (x *RuntimeFractionalPercent) Reset() {
+ *x = RuntimeFractionalPercent{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[19]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RuntimeFractionalPercent) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RuntimeFractionalPercent) ProtoMessage() {}
+
+func (x *RuntimeFractionalPercent) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[19]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RuntimeFractionalPercent.ProtoReflect.Descriptor instead.
+func (*RuntimeFractionalPercent) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{19}
+}
+
+func (x *RuntimeFractionalPercent) GetDefaultValue() *v3.FractionalPercent {
+ if x != nil {
+ return x.DefaultValue
+ }
+ return nil
+}
+
+func (x *RuntimeFractionalPercent) GetRuntimeKey() string {
+ if x != nil {
+ return x.RuntimeKey
+ }
+ return ""
+}
+
+// Identifies a specific ControlPlane instance that Envoy is connected to.
+type ControlPlane struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // An opaque control plane identifier that uniquely identifies an instance
+ // of control plane. This can be used to identify which control plane instance,
+ // the Envoy is connected to.
+ Identifier string `protobuf:"bytes,1,opt,name=identifier,proto3" json:"identifier,omitempty"`
+}
+
+func (x *ControlPlane) Reset() {
+ *x = ControlPlane{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[20]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ControlPlane) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ControlPlane) ProtoMessage() {}
+
+func (x *ControlPlane) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_base_proto_msgTypes[20]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ControlPlane.ProtoReflect.Descriptor instead.
+func (*ControlPlane) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_base_proto_rawDescGZIP(), []int{20}
+}
+
+func (x *ControlPlane) GetIdentifier() string {
+ if x != nil {
+ return x.Identifier
+ }
+ return ""
+}
+
+var File_envoy_config_core_v3_base_proto protoreflect.FileDescriptor
+
+var file_envoy_config_core_v3_base_proto_rawDesc = []byte{
+ 0x0a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63,
+ 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x12, 0x14, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e,
+ 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x64,
+ 0x64, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x22, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76,
+ 0x33, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
+ 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f,
+ 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x75, 0x72, 0x69, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65,
+ 0x2f, 0x76, 0x33, 0x2f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x1a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33,
+ 0x2f, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f,
+ 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x1a, 0x20, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f,
+ 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e,
+ 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e,
+ 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e,
+ 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e,
+ 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64,
+ 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x22, 0x74, 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x16,
+ 0x0a, 0x06, 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06,
+ 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x7a, 0x6f, 0x6e, 0x65, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x7a, 0x6f, 0x6e, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x75,
+ 0x62, 0x5f, 0x7a, 0x6f, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75,
+ 0x62, 0x5a, 0x6f, 0x6e, 0x65, 0x3a, 0x21, 0x9a, 0xc5, 0x88, 0x1e, 0x1c, 0x0a, 0x1a, 0x65, 0x6e,
+ 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e,
+ 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x22, 0xa4, 0x01, 0x0a, 0x0c, 0x42, 0x75, 0x69,
+ 0x6c, 0x64, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x38, 0x0a, 0x07, 0x76, 0x65, 0x72,
+ 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e,
+ 0x74, 0x69, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73,
+ 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x08,
+ 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x3a, 0x25, 0x9a, 0xc5, 0x88, 0x1e, 0x20, 0x0a,
+ 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f,
+ 0x72, 0x65, 0x2e, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22,
+ 0x8c, 0x02, 0x0a, 0x09, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a,
+ 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d,
+ 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x08, 0x63, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x12, 0x34, 0x0a,
+ 0x0f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0x18, 0x01, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03,
+ 0x33, 0x2e, 0x30, 0x52, 0x0e, 0x74, 0x79, 0x70, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+ 0x74, 0x6f, 0x72, 0x12, 0x3c, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x42, 0x75, 0x69, 0x6c,
+ 0x64, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f,
+ 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x05, 0x20,
+ 0x01, 0x28, 0x08, 0x52, 0x08, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x1b, 0x0a,
+ 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09,
+ 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x55, 0x72, 0x6c, 0x73, 0x3a, 0x22, 0x9a, 0xc5, 0x88, 0x1e,
+ 0x1d, 0x0a, 0x1b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e,
+ 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xb2,
+ 0x06, 0x0a, 0x04, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74,
+ 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65,
+ 0x72, 0x12, 0x33, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x08, 0x6d, 0x65,
+ 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x60, 0x0a, 0x12, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69,
+ 0x63, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x0c, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x2e, 0x44,
+ 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73,
+ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x50, 0x61,
+ 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x3a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61,
+ 0x6c, 0x69, 0x74, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76,
+ 0x33, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61,
+ 0x6c, 0x69, 0x74, 0x79, 0x12, 0x26, 0x0a, 0x0f, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x61, 0x67, 0x65,
+ 0x6e, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x75,
+ 0x73, 0x65, 0x72, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2e, 0x0a, 0x12,
+ 0x75, 0x73, 0x65, 0x72, 0x5f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69,
+ 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x10, 0x75, 0x73, 0x65, 0x72,
+ 0x41, 0x67, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x5d, 0x0a, 0x18,
+ 0x75, 0x73, 0x65, 0x72, 0x5f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x75, 0x69, 0x6c, 0x64,
+ 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22,
+ 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f,
+ 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x56, 0x65, 0x72, 0x73, 0x69,
+ 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x15, 0x75, 0x73, 0x65, 0x72, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x42,
+ 0x75, 0x69, 0x6c, 0x64, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x3f, 0x0a, 0x0a, 0x65,
+ 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x1f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63,
+ 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
+ 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f,
+ 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18,
+ 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x46, 0x65, 0x61,
+ 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x5b, 0x0a, 0x13, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x69,
+ 0x6e, 0x67, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x0b, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73,
+ 0x73, 0x42, 0x0b, 0x18, 0x01, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x52, 0x12,
+ 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x69, 0x6e, 0x67, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73,
+ 0x65, 0x73, 0x1a, 0x60, 0x0a, 0x16, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x50, 0x61, 0x72,
+ 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03,
+ 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x30,
+ 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e,
+ 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x74,
+ 0x65, 0x78, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x1d, 0x9a, 0xc5, 0x88, 0x1e, 0x18, 0x0a, 0x16, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4e,
+ 0x6f, 0x64, 0x65, 0x42, 0x19, 0x0a, 0x17, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x61, 0x67, 0x65, 0x6e,
+ 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x4a, 0x04,
+ 0x08, 0x05, 0x10, 0x06, 0x52, 0x0d, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x5f, 0x76, 0x65, 0x72, 0x73,
+ 0x69, 0x6f, 0x6e, 0x22, 0xb1, 0x03, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61,
+ 0x12, 0x5b, 0x0a, 0x0f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64,
+ 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33,
+ 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72,
+ 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x66,
+ 0x69, 0x6c, 0x74, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x6b, 0x0a,
+ 0x15, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x6d, 0x65,
+ 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x65,
+ 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65,
+ 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x79, 0x70,
+ 0x65, 0x64, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61,
+ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x13, 0x74, 0x79, 0x70, 0x65, 0x64, 0x46, 0x69, 0x6c, 0x74,
+ 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x5a, 0x0a, 0x13, 0x46, 0x69,
+ 0x6c, 0x74, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72,
+ 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03,
+ 0x6b, 0x65, 0x79, 0x12, 0x2d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x5c, 0x0a, 0x18, 0x54, 0x79, 0x70, 0x65, 0x64, 0x46,
+ 0x69, 0x6c, 0x74, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74,
+ 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x21, 0x9a, 0xc5, 0x88, 0x1e, 0x1c, 0x0a, 0x1a, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4d,
+ 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x86, 0x01, 0x0a, 0x0d, 0x52, 0x75, 0x6e, 0x74,
+ 0x69, 0x6d, 0x65, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66,
+ 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d,
+ 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x28,
+ 0x0a, 0x0b, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0a, 0x72, 0x75,
+ 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x4b, 0x65, 0x79, 0x3a, 0x26, 0x9a, 0xc5, 0x88, 0x1e, 0x21, 0x0a,
+ 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f,
+ 0x72, 0x65, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32,
+ 0x22, 0x77, 0x0a, 0x0e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x50, 0x65, 0x72, 0x63, 0x65,
+ 0x6e, 0x74, 0x12, 0x3b, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e,
+ 0x74, 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12,
+ 0x28, 0x0a, 0x0b, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0a, 0x72,
+ 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x4b, 0x65, 0x79, 0x22, 0x86, 0x01, 0x0a, 0x0d, 0x52, 0x75,
+ 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64,
+ 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x01, 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65,
+ 0x12, 0x28, 0x0a, 0x0b, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0a,
+ 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x4b, 0x65, 0x79, 0x3a, 0x26, 0x9a, 0xc5, 0x88, 0x1e,
+ 0x21, 0x0a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e,
+ 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x44, 0x6f, 0x75, 0x62,
+ 0x6c, 0x65, 0x22, 0xb6, 0x01, 0x0a, 0x12, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x46, 0x65,
+ 0x61, 0x74, 0x75, 0x72, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x12, 0x49, 0x0a, 0x0d, 0x64, 0x65, 0x66,
+ 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x08, 0xfa, 0x42,
+ 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56,
+ 0x61, 0x6c, 0x75, 0x65, 0x12, 0x28, 0x0a, 0x0b, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f,
+ 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02,
+ 0x10, 0x01, 0x52, 0x0a, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x4b, 0x65, 0x79, 0x3a, 0x2b,
+ 0x9a, 0xc5, 0x88, 0x1e, 0x26, 0x0a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69,
+ 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65,
+ 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x22, 0x41, 0x0a, 0x0e, 0x51,
+ 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x19, 0x0a,
+ 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72,
+ 0x02, 0x10, 0x01, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xcd,
+ 0x01, 0x0a, 0x0b, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23,
+ 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x11, 0xfa, 0x42, 0x0e,
+ 0x72, 0x0c, 0x10, 0x01, 0x28, 0x80, 0x80, 0x01, 0xc0, 0x01, 0x01, 0xc8, 0x01, 0x00, 0x52, 0x03,
+ 0x6b, 0x65, 0x79, 0x12, 0x37, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x09, 0x42, 0x21, 0xfa, 0x42, 0x0c, 0x72, 0x0a, 0x28, 0x80, 0x80, 0x01, 0xc0, 0x01, 0x02,
+ 0xc8, 0x01, 0x00, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x0c, 0x12, 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x5f, 0x74, 0x79, 0x70, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3a, 0x0a, 0x09,
+ 0x72, 0x61, 0x77, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x42,
+ 0x1d, 0xfa, 0x42, 0x08, 0x7a, 0x06, 0x10, 0x00, 0x18, 0x80, 0x80, 0x01, 0xf2, 0x98, 0xfe, 0x8f,
+ 0x05, 0x0c, 0x12, 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x52, 0x08,
+ 0x72, 0x61, 0x77, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x24, 0x9a, 0xc5, 0x88, 0x1e, 0x1f, 0x0a,
+ 0x1d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f,
+ 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xd9,
+ 0x03, 0x0a, 0x11, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x43, 0x0a, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64,
+ 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10,
+ 0x01, 0x52, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x06, 0x61, 0x70, 0x70,
+ 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c,
+ 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0b, 0x18, 0x01, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33,
+ 0x2e, 0x30, 0x52, 0x06, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x12, 0x69, 0x0a, 0x0d, 0x61, 0x70,
+ 0x70, 0x65, 0x6e, 0x64, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x0e, 0x32, 0x3a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56,
+ 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65,
+ 0x72, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa,
+ 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0c, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x41,
+ 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x10, 0x6b, 0x65, 0x65, 0x70, 0x5f, 0x65, 0x6d,
+ 0x70, 0x74, 0x79, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52,
+ 0x0e, 0x6b, 0x65, 0x65, 0x70, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x22,
+ 0x7d, 0x0a, 0x12, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x41,
+ 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x41, 0x50, 0x50, 0x45, 0x4e, 0x44, 0x5f,
+ 0x49, 0x46, 0x5f, 0x45, 0x58, 0x49, 0x53, 0x54, 0x53, 0x5f, 0x4f, 0x52, 0x5f, 0x41, 0x44, 0x44,
+ 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x41, 0x44, 0x44, 0x5f, 0x49, 0x46, 0x5f, 0x41, 0x42, 0x53,
+ 0x45, 0x4e, 0x54, 0x10, 0x01, 0x12, 0x1e, 0x0a, 0x1a, 0x4f, 0x56, 0x45, 0x52, 0x57, 0x52, 0x49,
+ 0x54, 0x45, 0x5f, 0x49, 0x46, 0x5f, 0x45, 0x58, 0x49, 0x53, 0x54, 0x53, 0x5f, 0x4f, 0x52, 0x5f,
+ 0x41, 0x44, 0x44, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x4f, 0x56, 0x45, 0x52, 0x57, 0x52, 0x49,
+ 0x54, 0x45, 0x5f, 0x49, 0x46, 0x5f, 0x45, 0x58, 0x49, 0x53, 0x54, 0x53, 0x10, 0x03, 0x3a, 0x2a,
+ 0x9a, 0xc5, 0x88, 0x1e, 0x25, 0x0a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69,
+ 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56,
+ 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x6c, 0x0a, 0x09, 0x48, 0x65,
+ 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x70, 0x12, 0x3b, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65,
+ 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e,
+ 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x07, 0x68, 0x65, 0x61,
+ 0x64, 0x65, 0x72, 0x73, 0x3a, 0x22, 0x9a, 0xc5, 0x88, 0x1e, 0x1d, 0x0a, 0x1b, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48,
+ 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x70, 0x22, 0x2f, 0x0a, 0x10, 0x57, 0x61, 0x74, 0x63,
+ 0x68, 0x65, 0x64, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x1b, 0x0a, 0x04,
+ 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72,
+ 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x22, 0xf4, 0x01, 0x0a, 0x0a, 0x44, 0x61,
+ 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x25, 0x0a, 0x08, 0x66, 0x69, 0x6c, 0x65,
+ 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72,
+ 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x12,
+ 0x23, 0x0a, 0x0c, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0b, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x42,
+ 0x79, 0x74, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0d, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x73,
+ 0x74, 0x72, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, 0x69,
+ 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x3c, 0x0a, 0x14, 0x65,
+ 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x72, 0x69, 0x61,
+ 0x62, 0x6c, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02,
+ 0x10, 0x01, 0x48, 0x00, 0x52, 0x13, 0x65, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e,
+ 0x74, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x3a, 0x23, 0x9a, 0xc5, 0x88, 0x1e, 0x1e,
+ 0x0a, 0x1c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63,
+ 0x6f, 0x72, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x10,
+ 0x0a, 0x09, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01,
+ 0x22, 0xd4, 0x01, 0x0a, 0x0b, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79,
+ 0x12, 0x4b, 0x0a, 0x0e, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x5f, 0x6f,
+ 0x66, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e,
+ 0x42, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x52,
+ 0x0c, 0x72, 0x65, 0x74, 0x72, 0x79, 0x42, 0x61, 0x63, 0x6b, 0x4f, 0x66, 0x66, 0x12, 0x52, 0x0a,
+ 0x0b, 0x6e, 0x75, 0x6d, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65,
+ 0x42, 0x13, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x0d, 0x0a, 0x0b, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x65,
+ 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, 0x0a, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65,
+ 0x73, 0x3a, 0x24, 0x9a, 0xc5, 0x88, 0x1e, 0x1f, 0x0a, 0x1d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e,
+ 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x65, 0x74, 0x72,
+ 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0xe8, 0x01, 0x0a, 0x10, 0x52, 0x65, 0x6d, 0x6f,
+ 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x42, 0x0a, 0x08,
+ 0x68, 0x74, 0x74, 0x70, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d,
+ 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f,
+ 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x55, 0x72, 0x69, 0x42, 0x08, 0xfa,
+ 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x07, 0x68, 0x74, 0x74, 0x70, 0x55, 0x72, 0x69,
+ 0x12, 0x1f, 0x0a, 0x06, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x06, 0x73, 0x68, 0x61, 0x32, 0x35,
+ 0x36, 0x12, 0x44, 0x0a, 0x0c, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63,
+ 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e,
+ 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52,
+ 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0b, 0x72, 0x65, 0x74, 0x72,
+ 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x3a, 0x29, 0x9a, 0xc5, 0x88, 0x1e, 0x24, 0x0a, 0x22,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72,
+ 0x65, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x22, 0xc9, 0x01, 0x0a, 0x0f, 0x41, 0x73, 0x79, 0x6e, 0x63, 0x44, 0x61, 0x74, 0x61,
+ 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x38, 0x0a, 0x05, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74,
+ 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x48, 0x00, 0x52, 0x05, 0x6c, 0x6f, 0x63, 0x61, 0x6c,
+ 0x12, 0x40, 0x0a, 0x06, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e,
+ 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x44, 0x61,
+ 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x48, 0x00, 0x52, 0x06, 0x72, 0x65, 0x6d, 0x6f,
+ 0x74, 0x65, 0x3a, 0x28, 0x9a, 0xc5, 0x88, 0x1e, 0x23, 0x0a, 0x21, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x41, 0x73, 0x79,
+ 0x6e, 0x63, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x10, 0x0a, 0x09,
+ 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0xb0,
+ 0x01, 0x0a, 0x0f, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x53, 0x6f, 0x63, 0x6b,
+ 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12,
+ 0x39, 0x0a, 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x74,
+ 0x79, 0x70, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x28, 0x9a, 0xc5, 0x88, 0x1e,
+ 0x23, 0x0a, 0x21, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e,
+ 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x53, 0x6f,
+ 0x63, 0x6b, 0x65, 0x74, 0x42, 0x0d, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x74,
+ 0x79, 0x70, 0x65, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x22, 0xbf, 0x01, 0x0a, 0x18, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x46, 0x72, 0x61,
+ 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x12, 0x4f,
+ 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79,
+ 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c,
+ 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10,
+ 0x01, 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12,
+ 0x1f, 0x0a, 0x0b, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x4b, 0x65, 0x79,
+ 0x3a, 0x31, 0x9a, 0xc5, 0x88, 0x1e, 0x2c, 0x0a, 0x2a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61,
+ 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69,
+ 0x6d, 0x65, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63,
+ 0x65, 0x6e, 0x74, 0x22, 0x55, 0x0a, 0x0c, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x50, 0x6c,
+ 0x61, 0x6e, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65,
+ 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66,
+ 0x69, 0x65, 0x72, 0x3a, 0x25, 0x9a, 0xc5, 0x88, 0x1e, 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x6f,
+ 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x50, 0x6c, 0x61, 0x6e, 0x65, 0x2a, 0x28, 0x0a, 0x0f, 0x52, 0x6f,
+ 0x75, 0x74, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x0b, 0x0a,
+ 0x07, 0x44, 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x48, 0x49,
+ 0x47, 0x48, 0x10, 0x01, 0x2a, 0x89, 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x16, 0x0a, 0x12, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44,
+ 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07,
+ 0x0a, 0x03, 0x47, 0x45, 0x54, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x48, 0x45, 0x41, 0x44, 0x10,
+ 0x02, 0x12, 0x08, 0x0a, 0x04, 0x50, 0x4f, 0x53, 0x54, 0x10, 0x03, 0x12, 0x07, 0x0a, 0x03, 0x50,
+ 0x55, 0x54, 0x10, 0x04, 0x12, 0x0a, 0x0a, 0x06, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x10, 0x05,
+ 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x10, 0x06, 0x12, 0x0b, 0x0a,
+ 0x07, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x53, 0x10, 0x07, 0x12, 0x09, 0x0a, 0x05, 0x54, 0x52,
+ 0x41, 0x43, 0x45, 0x10, 0x08, 0x12, 0x09, 0x0a, 0x05, 0x50, 0x41, 0x54, 0x43, 0x48, 0x10, 0x09,
+ 0x2a, 0x3e, 0x0a, 0x10, 0x54, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x44, 0x69, 0x72, 0x65, 0x63,
+ 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46,
+ 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x42, 0x4f, 0x55, 0x4e, 0x44,
+ 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x4f, 0x55, 0x54, 0x42, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x02,
+ 0x42, 0x7d, 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78,
+ 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63,
+ 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x09, 0x42, 0x61, 0x73, 0x65, 0x50, 0x72, 0x6f, 0x74,
+ 0x6f, 0x50, 0x01, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f,
+ 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33,
+ 0x3b, 0x63, 0x6f, 0x72, 0x65, 0x76, 0x33, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62,
+ 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_config_core_v3_base_proto_rawDescOnce sync.Once
+ file_envoy_config_core_v3_base_proto_rawDescData = file_envoy_config_core_v3_base_proto_rawDesc
+)
+
+func file_envoy_config_core_v3_base_proto_rawDescGZIP() []byte {
+ file_envoy_config_core_v3_base_proto_rawDescOnce.Do(func() {
+ file_envoy_config_core_v3_base_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_core_v3_base_proto_rawDescData)
+ })
+ return file_envoy_config_core_v3_base_proto_rawDescData
+}
+
+var file_envoy_config_core_v3_base_proto_enumTypes = make([]protoimpl.EnumInfo, 4)
+var file_envoy_config_core_v3_base_proto_msgTypes = make([]protoimpl.MessageInfo, 24)
+var file_envoy_config_core_v3_base_proto_goTypes = []interface{}{
+ (RoutingPriority)(0), // 0: envoy.config.core.v3.RoutingPriority
+ (RequestMethod)(0), // 1: envoy.config.core.v3.RequestMethod
+ (TrafficDirection)(0), // 2: envoy.config.core.v3.TrafficDirection
+ (HeaderValueOption_HeaderAppendAction)(0), // 3: envoy.config.core.v3.HeaderValueOption.HeaderAppendAction
+ (*Locality)(nil), // 4: envoy.config.core.v3.Locality
+ (*BuildVersion)(nil), // 5: envoy.config.core.v3.BuildVersion
+ (*Extension)(nil), // 6: envoy.config.core.v3.Extension
+ (*Node)(nil), // 7: envoy.config.core.v3.Node
+ (*Metadata)(nil), // 8: envoy.config.core.v3.Metadata
+ (*RuntimeUInt32)(nil), // 9: envoy.config.core.v3.RuntimeUInt32
+ (*RuntimePercent)(nil), // 10: envoy.config.core.v3.RuntimePercent
+ (*RuntimeDouble)(nil), // 11: envoy.config.core.v3.RuntimeDouble
+ (*RuntimeFeatureFlag)(nil), // 12: envoy.config.core.v3.RuntimeFeatureFlag
+ (*QueryParameter)(nil), // 13: envoy.config.core.v3.QueryParameter
+ (*HeaderValue)(nil), // 14: envoy.config.core.v3.HeaderValue
+ (*HeaderValueOption)(nil), // 15: envoy.config.core.v3.HeaderValueOption
+ (*HeaderMap)(nil), // 16: envoy.config.core.v3.HeaderMap
+ (*WatchedDirectory)(nil), // 17: envoy.config.core.v3.WatchedDirectory
+ (*DataSource)(nil), // 18: envoy.config.core.v3.DataSource
+ (*RetryPolicy)(nil), // 19: envoy.config.core.v3.RetryPolicy
+ (*RemoteDataSource)(nil), // 20: envoy.config.core.v3.RemoteDataSource
+ (*AsyncDataSource)(nil), // 21: envoy.config.core.v3.AsyncDataSource
+ (*TransportSocket)(nil), // 22: envoy.config.core.v3.TransportSocket
+ (*RuntimeFractionalPercent)(nil), // 23: envoy.config.core.v3.RuntimeFractionalPercent
+ (*ControlPlane)(nil), // 24: envoy.config.core.v3.ControlPlane
+ nil, // 25: envoy.config.core.v3.Node.DynamicParametersEntry
+ nil, // 26: envoy.config.core.v3.Metadata.FilterMetadataEntry
+ nil, // 27: envoy.config.core.v3.Metadata.TypedFilterMetadataEntry
+ (*v3.SemanticVersion)(nil), // 28: envoy.type.v3.SemanticVersion
+ (*structpb.Struct)(nil), // 29: google.protobuf.Struct
+ (*Address)(nil), // 30: envoy.config.core.v3.Address
+ (*v3.Percent)(nil), // 31: envoy.type.v3.Percent
+ (*wrapperspb.BoolValue)(nil), // 32: google.protobuf.BoolValue
+ (*BackoffStrategy)(nil), // 33: envoy.config.core.v3.BackoffStrategy
+ (*wrapperspb.UInt32Value)(nil), // 34: google.protobuf.UInt32Value
+ (*HttpUri)(nil), // 35: envoy.config.core.v3.HttpUri
+ (*anypb.Any)(nil), // 36: google.protobuf.Any
+ (*v3.FractionalPercent)(nil), // 37: envoy.type.v3.FractionalPercent
+ (*v31.ContextParams)(nil), // 38: xds.core.v3.ContextParams
+}
+var file_envoy_config_core_v3_base_proto_depIdxs = []int32{
+ 28, // 0: envoy.config.core.v3.BuildVersion.version:type_name -> envoy.type.v3.SemanticVersion
+ 29, // 1: envoy.config.core.v3.BuildVersion.metadata:type_name -> google.protobuf.Struct
+ 5, // 2: envoy.config.core.v3.Extension.version:type_name -> envoy.config.core.v3.BuildVersion
+ 29, // 3: envoy.config.core.v3.Node.metadata:type_name -> google.protobuf.Struct
+ 25, // 4: envoy.config.core.v3.Node.dynamic_parameters:type_name -> envoy.config.core.v3.Node.DynamicParametersEntry
+ 4, // 5: envoy.config.core.v3.Node.locality:type_name -> envoy.config.core.v3.Locality
+ 5, // 6: envoy.config.core.v3.Node.user_agent_build_version:type_name -> envoy.config.core.v3.BuildVersion
+ 6, // 7: envoy.config.core.v3.Node.extensions:type_name -> envoy.config.core.v3.Extension
+ 30, // 8: envoy.config.core.v3.Node.listening_addresses:type_name -> envoy.config.core.v3.Address
+ 26, // 9: envoy.config.core.v3.Metadata.filter_metadata:type_name -> envoy.config.core.v3.Metadata.FilterMetadataEntry
+ 27, // 10: envoy.config.core.v3.Metadata.typed_filter_metadata:type_name -> envoy.config.core.v3.Metadata.TypedFilterMetadataEntry
+ 31, // 11: envoy.config.core.v3.RuntimePercent.default_value:type_name -> envoy.type.v3.Percent
+ 32, // 12: envoy.config.core.v3.RuntimeFeatureFlag.default_value:type_name -> google.protobuf.BoolValue
+ 14, // 13: envoy.config.core.v3.HeaderValueOption.header:type_name -> envoy.config.core.v3.HeaderValue
+ 32, // 14: envoy.config.core.v3.HeaderValueOption.append:type_name -> google.protobuf.BoolValue
+ 3, // 15: envoy.config.core.v3.HeaderValueOption.append_action:type_name -> envoy.config.core.v3.HeaderValueOption.HeaderAppendAction
+ 14, // 16: envoy.config.core.v3.HeaderMap.headers:type_name -> envoy.config.core.v3.HeaderValue
+ 33, // 17: envoy.config.core.v3.RetryPolicy.retry_back_off:type_name -> envoy.config.core.v3.BackoffStrategy
+ 34, // 18: envoy.config.core.v3.RetryPolicy.num_retries:type_name -> google.protobuf.UInt32Value
+ 35, // 19: envoy.config.core.v3.RemoteDataSource.http_uri:type_name -> envoy.config.core.v3.HttpUri
+ 19, // 20: envoy.config.core.v3.RemoteDataSource.retry_policy:type_name -> envoy.config.core.v3.RetryPolicy
+ 18, // 21: envoy.config.core.v3.AsyncDataSource.local:type_name -> envoy.config.core.v3.DataSource
+ 20, // 22: envoy.config.core.v3.AsyncDataSource.remote:type_name -> envoy.config.core.v3.RemoteDataSource
+ 36, // 23: envoy.config.core.v3.TransportSocket.typed_config:type_name -> google.protobuf.Any
+ 37, // 24: envoy.config.core.v3.RuntimeFractionalPercent.default_value:type_name -> envoy.type.v3.FractionalPercent
+ 38, // 25: envoy.config.core.v3.Node.DynamicParametersEntry.value:type_name -> xds.core.v3.ContextParams
+ 29, // 26: envoy.config.core.v3.Metadata.FilterMetadataEntry.value:type_name -> google.protobuf.Struct
+ 36, // 27: envoy.config.core.v3.Metadata.TypedFilterMetadataEntry.value:type_name -> google.protobuf.Any
+ 28, // [28:28] is the sub-list for method output_type
+ 28, // [28:28] is the sub-list for method input_type
+ 28, // [28:28] is the sub-list for extension type_name
+ 28, // [28:28] is the sub-list for extension extendee
+ 0, // [0:28] is the sub-list for field type_name
+}
+
+func init() { file_envoy_config_core_v3_base_proto_init() }
+func file_envoy_config_core_v3_base_proto_init() {
+ if File_envoy_config_core_v3_base_proto != nil {
+ return
+ }
+ file_envoy_config_core_v3_address_proto_init()
+ file_envoy_config_core_v3_backoff_proto_init()
+ file_envoy_config_core_v3_http_uri_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_config_core_v3_base_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Locality); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_base_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*BuildVersion); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_base_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Extension); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_base_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Node); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_base_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Metadata); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_base_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RuntimeUInt32); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_base_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RuntimePercent); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_base_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RuntimeDouble); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_base_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RuntimeFeatureFlag); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_base_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*QueryParameter); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_base_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*HeaderValue); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_base_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*HeaderValueOption); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_base_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*HeaderMap); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_base_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*WatchedDirectory); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_base_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DataSource); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_base_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RetryPolicy); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_base_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RemoteDataSource); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_base_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AsyncDataSource); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_base_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*TransportSocket); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_base_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RuntimeFractionalPercent); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_base_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ControlPlane); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_envoy_config_core_v3_base_proto_msgTypes[3].OneofWrappers = []interface{}{
+ (*Node_UserAgentVersion)(nil),
+ (*Node_UserAgentBuildVersion)(nil),
+ }
+ file_envoy_config_core_v3_base_proto_msgTypes[14].OneofWrappers = []interface{}{
+ (*DataSource_Filename)(nil),
+ (*DataSource_InlineBytes)(nil),
+ (*DataSource_InlineString)(nil),
+ (*DataSource_EnvironmentVariable)(nil),
+ }
+ file_envoy_config_core_v3_base_proto_msgTypes[17].OneofWrappers = []interface{}{
+ (*AsyncDataSource_Local)(nil),
+ (*AsyncDataSource_Remote)(nil),
+ }
+ file_envoy_config_core_v3_base_proto_msgTypes[18].OneofWrappers = []interface{}{
+ (*TransportSocket_TypedConfig)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_config_core_v3_base_proto_rawDesc,
+ NumEnums: 4,
+ NumMessages: 24,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_config_core_v3_base_proto_goTypes,
+ DependencyIndexes: file_envoy_config_core_v3_base_proto_depIdxs,
+ EnumInfos: file_envoy_config_core_v3_base_proto_enumTypes,
+ MessageInfos: file_envoy_config_core_v3_base_proto_msgTypes,
+ }.Build()
+ File_envoy_config_core_v3_base_proto = out.File
+ file_envoy_config_core_v3_base_proto_rawDesc = nil
+ file_envoy_config_core_v3_base_proto_goTypes = nil
+ file_envoy_config_core_v3_base_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/base.pb.validate.go b/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/base.pb.validate.go
new file mode 100644
index 000000000..7e2dd5390
--- /dev/null
+++ b/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/base.pb.validate.go
@@ -0,0 +1,3316 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/config/core/v3/base.proto
+
+package corev3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on Locality with the rules defined in the
+// proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *Locality) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on Locality with the rules defined in
+// the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in LocalityMultiError, or nil
+// if none found.
+func (m *Locality) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Locality) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for Region
+
+ // no validation rules for Zone
+
+ // no validation rules for SubZone
+
+ if len(errors) > 0 {
+ return LocalityMultiError(errors)
+ }
+
+ return nil
+}
+
+// LocalityMultiError is an error wrapping multiple validation errors returned
+// by Locality.ValidateAll() if the designated constraints aren't met.
+type LocalityMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m LocalityMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m LocalityMultiError) AllErrors() []error { return m }
+
+// LocalityValidationError is the validation error returned by
+// Locality.Validate if the designated constraints aren't met.
+type LocalityValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e LocalityValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e LocalityValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e LocalityValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e LocalityValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e LocalityValidationError) ErrorName() string { return "LocalityValidationError" }
+
+// Error satisfies the builtin error interface
+func (e LocalityValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sLocality.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = LocalityValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = LocalityValidationError{}
+
+// Validate checks the field values on BuildVersion with the rules defined in
+// the proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *BuildVersion) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on BuildVersion with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in BuildVersionMultiError, or
+// nil if none found.
+func (m *BuildVersion) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *BuildVersion) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if all {
+ switch v := interface{}(m.GetVersion()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, BuildVersionValidationError{
+ field: "Version",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, BuildVersionValidationError{
+ field: "Version",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetVersion()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return BuildVersionValidationError{
+ field: "Version",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetMetadata()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, BuildVersionValidationError{
+ field: "Metadata",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, BuildVersionValidationError{
+ field: "Metadata",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetMetadata()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return BuildVersionValidationError{
+ field: "Metadata",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return BuildVersionMultiError(errors)
+ }
+
+ return nil
+}
+
+// BuildVersionMultiError is an error wrapping multiple validation errors
+// returned by BuildVersion.ValidateAll() if the designated constraints aren't met.
+type BuildVersionMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m BuildVersionMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m BuildVersionMultiError) AllErrors() []error { return m }
+
+// BuildVersionValidationError is the validation error returned by
+// BuildVersion.Validate if the designated constraints aren't met.
+type BuildVersionValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e BuildVersionValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e BuildVersionValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e BuildVersionValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e BuildVersionValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e BuildVersionValidationError) ErrorName() string { return "BuildVersionValidationError" }
+
+// Error satisfies the builtin error interface
+func (e BuildVersionValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sBuildVersion.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = BuildVersionValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = BuildVersionValidationError{}
+
+// Validate checks the field values on Extension with the rules defined in the
+// proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *Extension) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on Extension with the rules defined in
+// the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in ExtensionMultiError, or nil
+// if none found.
+func (m *Extension) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Extension) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for Name
+
+ // no validation rules for Category
+
+ // no validation rules for TypeDescriptor
+
+ if all {
+ switch v := interface{}(m.GetVersion()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ExtensionValidationError{
+ field: "Version",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ExtensionValidationError{
+ field: "Version",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetVersion()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ExtensionValidationError{
+ field: "Version",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for Disabled
+
+ if len(errors) > 0 {
+ return ExtensionMultiError(errors)
+ }
+
+ return nil
+}
+
+// ExtensionMultiError is an error wrapping multiple validation errors returned
+// by Extension.ValidateAll() if the designated constraints aren't met.
+type ExtensionMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ExtensionMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ExtensionMultiError) AllErrors() []error { return m }
+
+// ExtensionValidationError is the validation error returned by
+// Extension.Validate if the designated constraints aren't met.
+type ExtensionValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ExtensionValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ExtensionValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ExtensionValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ExtensionValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ExtensionValidationError) ErrorName() string { return "ExtensionValidationError" }
+
+// Error satisfies the builtin error interface
+func (e ExtensionValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sExtension.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ExtensionValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ExtensionValidationError{}
+
+// Validate checks the field values on Node with the rules defined in the proto
+// definition for this message. If any rules are violated, the first error
+// encountered is returned, or nil if there are no violations.
+func (m *Node) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on Node with the rules defined in the
+// proto definition for this message. If any rules are violated, the result is
+// a list of violation errors wrapped in NodeMultiError, or nil if none found.
+func (m *Node) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Node) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for Id
+
+ // no validation rules for Cluster
+
+ if all {
+ switch v := interface{}(m.GetMetadata()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, NodeValidationError{
+ field: "Metadata",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, NodeValidationError{
+ field: "Metadata",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetMetadata()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return NodeValidationError{
+ field: "Metadata",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ {
+ sorted_keys := make([]string, len(m.GetDynamicParameters()))
+ i := 0
+ for key := range m.GetDynamicParameters() {
+ sorted_keys[i] = key
+ i++
+ }
+ sort.Slice(sorted_keys, func(i, j int) bool { return sorted_keys[i] < sorted_keys[j] })
+ for _, key := range sorted_keys {
+ val := m.GetDynamicParameters()[key]
+ _ = val
+
+ // no validation rules for DynamicParameters[key]
+
+ if all {
+ switch v := interface{}(val).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, NodeValidationError{
+ field: fmt.Sprintf("DynamicParameters[%v]", key),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, NodeValidationError{
+ field: fmt.Sprintf("DynamicParameters[%v]", key),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(val).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return NodeValidationError{
+ field: fmt.Sprintf("DynamicParameters[%v]", key),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetLocality()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, NodeValidationError{
+ field: "Locality",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, NodeValidationError{
+ field: "Locality",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetLocality()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return NodeValidationError{
+ field: "Locality",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for UserAgentName
+
+ for idx, item := range m.GetExtensions() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, NodeValidationError{
+ field: fmt.Sprintf("Extensions[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, NodeValidationError{
+ field: fmt.Sprintf("Extensions[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return NodeValidationError{
+ field: fmt.Sprintf("Extensions[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ for idx, item := range m.GetListeningAddresses() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, NodeValidationError{
+ field: fmt.Sprintf("ListeningAddresses[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, NodeValidationError{
+ field: fmt.Sprintf("ListeningAddresses[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return NodeValidationError{
+ field: fmt.Sprintf("ListeningAddresses[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ switch v := m.UserAgentVersionType.(type) {
+ case *Node_UserAgentVersion:
+ if v == nil {
+ err := NodeValidationError{
+ field: "UserAgentVersionType",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ // no validation rules for UserAgentVersion
+ case *Node_UserAgentBuildVersion:
+ if v == nil {
+ err := NodeValidationError{
+ field: "UserAgentVersionType",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetUserAgentBuildVersion()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, NodeValidationError{
+ field: "UserAgentBuildVersion",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, NodeValidationError{
+ field: "UserAgentBuildVersion",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetUserAgentBuildVersion()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return NodeValidationError{
+ field: "UserAgentBuildVersion",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ default:
+ _ = v // ensures v is used
+ }
+
+ if len(errors) > 0 {
+ return NodeMultiError(errors)
+ }
+
+ return nil
+}
+
+// NodeMultiError is an error wrapping multiple validation errors returned by
+// Node.ValidateAll() if the designated constraints aren't met.
+type NodeMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m NodeMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m NodeMultiError) AllErrors() []error { return m }
+
+// NodeValidationError is the validation error returned by Node.Validate if the
+// designated constraints aren't met.
+type NodeValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e NodeValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e NodeValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e NodeValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e NodeValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e NodeValidationError) ErrorName() string { return "NodeValidationError" }
+
+// Error satisfies the builtin error interface
+func (e NodeValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sNode.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = NodeValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = NodeValidationError{}
+
+// Validate checks the field values on Metadata with the rules defined in the
+// proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *Metadata) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on Metadata with the rules defined in
+// the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in MetadataMultiError, or nil
+// if none found.
+func (m *Metadata) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Metadata) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ {
+ sorted_keys := make([]string, len(m.GetFilterMetadata()))
+ i := 0
+ for key := range m.GetFilterMetadata() {
+ sorted_keys[i] = key
+ i++
+ }
+ sort.Slice(sorted_keys, func(i, j int) bool { return sorted_keys[i] < sorted_keys[j] })
+ for _, key := range sorted_keys {
+ val := m.GetFilterMetadata()[key]
+ _ = val
+
+ // no validation rules for FilterMetadata[key]
+
+ if all {
+ switch v := interface{}(val).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, MetadataValidationError{
+ field: fmt.Sprintf("FilterMetadata[%v]", key),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, MetadataValidationError{
+ field: fmt.Sprintf("FilterMetadata[%v]", key),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(val).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return MetadataValidationError{
+ field: fmt.Sprintf("FilterMetadata[%v]", key),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+ }
+
+ {
+ sorted_keys := make([]string, len(m.GetTypedFilterMetadata()))
+ i := 0
+ for key := range m.GetTypedFilterMetadata() {
+ sorted_keys[i] = key
+ i++
+ }
+ sort.Slice(sorted_keys, func(i, j int) bool { return sorted_keys[i] < sorted_keys[j] })
+ for _, key := range sorted_keys {
+ val := m.GetTypedFilterMetadata()[key]
+ _ = val
+
+ // no validation rules for TypedFilterMetadata[key]
+
+ if all {
+ switch v := interface{}(val).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, MetadataValidationError{
+ field: fmt.Sprintf("TypedFilterMetadata[%v]", key),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, MetadataValidationError{
+ field: fmt.Sprintf("TypedFilterMetadata[%v]", key),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(val).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return MetadataValidationError{
+ field: fmt.Sprintf("TypedFilterMetadata[%v]", key),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+ }
+
+ if len(errors) > 0 {
+ return MetadataMultiError(errors)
+ }
+
+ return nil
+}
+
+// MetadataMultiError is an error wrapping multiple validation errors returned
+// by Metadata.ValidateAll() if the designated constraints aren't met.
+type MetadataMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m MetadataMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m MetadataMultiError) AllErrors() []error { return m }
+
+// MetadataValidationError is the validation error returned by
+// Metadata.Validate if the designated constraints aren't met.
+type MetadataValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e MetadataValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e MetadataValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e MetadataValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e MetadataValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e MetadataValidationError) ErrorName() string { return "MetadataValidationError" }
+
+// Error satisfies the builtin error interface
+func (e MetadataValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sMetadata.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = MetadataValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = MetadataValidationError{}
+
+// Validate checks the field values on RuntimeUInt32 with the rules defined in
+// the proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *RuntimeUInt32) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on RuntimeUInt32 with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in RuntimeUInt32MultiError, or
+// nil if none found.
+func (m *RuntimeUInt32) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *RuntimeUInt32) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for DefaultValue
+
+ if utf8.RuneCountInString(m.GetRuntimeKey()) < 1 {
+ err := RuntimeUInt32ValidationError{
+ field: "RuntimeKey",
+ reason: "value length must be at least 1 runes",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return RuntimeUInt32MultiError(errors)
+ }
+
+ return nil
+}
+
+// RuntimeUInt32MultiError is an error wrapping multiple validation errors
+// returned by RuntimeUInt32.ValidateAll() if the designated constraints
+// aren't met.
+type RuntimeUInt32MultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m RuntimeUInt32MultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m RuntimeUInt32MultiError) AllErrors() []error { return m }
+
+// RuntimeUInt32ValidationError is the validation error returned by
+// RuntimeUInt32.Validate if the designated constraints aren't met.
+type RuntimeUInt32ValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e RuntimeUInt32ValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e RuntimeUInt32ValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e RuntimeUInt32ValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e RuntimeUInt32ValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e RuntimeUInt32ValidationError) ErrorName() string { return "RuntimeUInt32ValidationError" }
+
+// Error satisfies the builtin error interface
+func (e RuntimeUInt32ValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sRuntimeUInt32.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = RuntimeUInt32ValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = RuntimeUInt32ValidationError{}
+
+// Validate checks the field values on RuntimePercent with the rules defined in
+// the proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *RuntimePercent) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on RuntimePercent with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in RuntimePercentMultiError,
+// or nil if none found.
+func (m *RuntimePercent) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *RuntimePercent) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if all {
+ switch v := interface{}(m.GetDefaultValue()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, RuntimePercentValidationError{
+ field: "DefaultValue",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, RuntimePercentValidationError{
+ field: "DefaultValue",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetDefaultValue()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return RuntimePercentValidationError{
+ field: "DefaultValue",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if utf8.RuneCountInString(m.GetRuntimeKey()) < 1 {
+ err := RuntimePercentValidationError{
+ field: "RuntimeKey",
+ reason: "value length must be at least 1 runes",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return RuntimePercentMultiError(errors)
+ }
+
+ return nil
+}
+
+// RuntimePercentMultiError is an error wrapping multiple validation errors
+// returned by RuntimePercent.ValidateAll() if the designated constraints
+// aren't met.
+type RuntimePercentMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m RuntimePercentMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m RuntimePercentMultiError) AllErrors() []error { return m }
+
+// RuntimePercentValidationError is the validation error returned by
+// RuntimePercent.Validate if the designated constraints aren't met.
+type RuntimePercentValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e RuntimePercentValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e RuntimePercentValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e RuntimePercentValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e RuntimePercentValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e RuntimePercentValidationError) ErrorName() string { return "RuntimePercentValidationError" }
+
+// Error satisfies the builtin error interface
+func (e RuntimePercentValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sRuntimePercent.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = RuntimePercentValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = RuntimePercentValidationError{}
+
+// Validate checks the field values on RuntimeDouble with the rules defined in
+// the proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *RuntimeDouble) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on RuntimeDouble with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in RuntimeDoubleMultiError, or
+// nil if none found.
+func (m *RuntimeDouble) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *RuntimeDouble) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for DefaultValue
+
+ if utf8.RuneCountInString(m.GetRuntimeKey()) < 1 {
+ err := RuntimeDoubleValidationError{
+ field: "RuntimeKey",
+ reason: "value length must be at least 1 runes",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return RuntimeDoubleMultiError(errors)
+ }
+
+ return nil
+}
+
+// RuntimeDoubleMultiError is an error wrapping multiple validation errors
+// returned by RuntimeDouble.ValidateAll() if the designated constraints
+// aren't met.
+type RuntimeDoubleMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m RuntimeDoubleMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m RuntimeDoubleMultiError) AllErrors() []error { return m }
+
+// RuntimeDoubleValidationError is the validation error returned by
+// RuntimeDouble.Validate if the designated constraints aren't met.
+type RuntimeDoubleValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e RuntimeDoubleValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e RuntimeDoubleValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e RuntimeDoubleValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e RuntimeDoubleValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e RuntimeDoubleValidationError) ErrorName() string { return "RuntimeDoubleValidationError" }
+
+// Error satisfies the builtin error interface
+func (e RuntimeDoubleValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sRuntimeDouble.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = RuntimeDoubleValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = RuntimeDoubleValidationError{}
+
+// Validate checks the field values on RuntimeFeatureFlag with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *RuntimeFeatureFlag) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on RuntimeFeatureFlag with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// RuntimeFeatureFlagMultiError, or nil if none found.
+func (m *RuntimeFeatureFlag) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *RuntimeFeatureFlag) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if m.GetDefaultValue() == nil {
+ err := RuntimeFeatureFlagValidationError{
+ field: "DefaultValue",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetDefaultValue()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, RuntimeFeatureFlagValidationError{
+ field: "DefaultValue",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, RuntimeFeatureFlagValidationError{
+ field: "DefaultValue",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetDefaultValue()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return RuntimeFeatureFlagValidationError{
+ field: "DefaultValue",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if utf8.RuneCountInString(m.GetRuntimeKey()) < 1 {
+ err := RuntimeFeatureFlagValidationError{
+ field: "RuntimeKey",
+ reason: "value length must be at least 1 runes",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return RuntimeFeatureFlagMultiError(errors)
+ }
+
+ return nil
+}
+
+// RuntimeFeatureFlagMultiError is an error wrapping multiple validation errors
+// returned by RuntimeFeatureFlag.ValidateAll() if the designated constraints
+// aren't met.
+type RuntimeFeatureFlagMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m RuntimeFeatureFlagMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m RuntimeFeatureFlagMultiError) AllErrors() []error { return m }
+
+// RuntimeFeatureFlagValidationError is the validation error returned by
+// RuntimeFeatureFlag.Validate if the designated constraints aren't met.
+type RuntimeFeatureFlagValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e RuntimeFeatureFlagValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e RuntimeFeatureFlagValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e RuntimeFeatureFlagValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e RuntimeFeatureFlagValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e RuntimeFeatureFlagValidationError) ErrorName() string {
+ return "RuntimeFeatureFlagValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e RuntimeFeatureFlagValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sRuntimeFeatureFlag.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = RuntimeFeatureFlagValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = RuntimeFeatureFlagValidationError{}
+
+// Validate checks the field values on QueryParameter with the rules defined in
+// the proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *QueryParameter) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on QueryParameter with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in QueryParameterMultiError,
+// or nil if none found.
+func (m *QueryParameter) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *QueryParameter) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if utf8.RuneCountInString(m.GetKey()) < 1 {
+ err := QueryParameterValidationError{
+ field: "Key",
+ reason: "value length must be at least 1 runes",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ // no validation rules for Value
+
+ if len(errors) > 0 {
+ return QueryParameterMultiError(errors)
+ }
+
+ return nil
+}
+
+// QueryParameterMultiError is an error wrapping multiple validation errors
+// returned by QueryParameter.ValidateAll() if the designated constraints
+// aren't met.
+type QueryParameterMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m QueryParameterMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m QueryParameterMultiError) AllErrors() []error { return m }
+
+// QueryParameterValidationError is the validation error returned by
+// QueryParameter.Validate if the designated constraints aren't met.
+type QueryParameterValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e QueryParameterValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e QueryParameterValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e QueryParameterValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e QueryParameterValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e QueryParameterValidationError) ErrorName() string { return "QueryParameterValidationError" }
+
+// Error satisfies the builtin error interface
+func (e QueryParameterValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sQueryParameter.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = QueryParameterValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = QueryParameterValidationError{}
+
+// Validate checks the field values on HeaderValue with the rules defined in
+// the proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *HeaderValue) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on HeaderValue with the rules defined in
+// the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in HeaderValueMultiError, or
+// nil if none found.
+func (m *HeaderValue) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *HeaderValue) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if utf8.RuneCountInString(m.GetKey()) < 1 {
+ err := HeaderValueValidationError{
+ field: "Key",
+ reason: "value length must be at least 1 runes",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if len(m.GetKey()) > 16384 {
+ err := HeaderValueValidationError{
+ field: "Key",
+ reason: "value length must be at most 16384 bytes",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if !_HeaderValue_Key_Pattern.MatchString(m.GetKey()) {
+ err := HeaderValueValidationError{
+ field: "Key",
+ reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if len(m.GetValue()) > 16384 {
+ err := HeaderValueValidationError{
+ field: "Value",
+ reason: "value length must be at most 16384 bytes",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if !_HeaderValue_Value_Pattern.MatchString(m.GetValue()) {
+ err := HeaderValueValidationError{
+ field: "Value",
+ reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if l := len(m.GetRawValue()); l < 0 || l > 16384 {
+ err := HeaderValueValidationError{
+ field: "RawValue",
+ reason: "value length must be between 0 and 16384 bytes, inclusive",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return HeaderValueMultiError(errors)
+ }
+
+ return nil
+}
+
+// HeaderValueMultiError is an error wrapping multiple validation errors
+// returned by HeaderValue.ValidateAll() if the designated constraints aren't met.
+type HeaderValueMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m HeaderValueMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m HeaderValueMultiError) AllErrors() []error { return m }
+
+// HeaderValueValidationError is the validation error returned by
+// HeaderValue.Validate if the designated constraints aren't met.
+type HeaderValueValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e HeaderValueValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e HeaderValueValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e HeaderValueValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e HeaderValueValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e HeaderValueValidationError) ErrorName() string { return "HeaderValueValidationError" }
+
+// Error satisfies the builtin error interface
+func (e HeaderValueValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sHeaderValue.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = HeaderValueValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = HeaderValueValidationError{}
+
+var _HeaderValue_Key_Pattern = regexp.MustCompile("^[^\x00\n\r]*$")
+
+var _HeaderValue_Value_Pattern = regexp.MustCompile("^[^\x00\n\r]*$")
+
+// Validate checks the field values on HeaderValueOption with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// first error encountered is returned, or nil if there are no violations.
+func (m *HeaderValueOption) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on HeaderValueOption with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// HeaderValueOptionMultiError, or nil if none found.
+func (m *HeaderValueOption) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *HeaderValueOption) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if m.GetHeader() == nil {
+ err := HeaderValueOptionValidationError{
+ field: "Header",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetHeader()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, HeaderValueOptionValidationError{
+ field: "Header",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, HeaderValueOptionValidationError{
+ field: "Header",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetHeader()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HeaderValueOptionValidationError{
+ field: "Header",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetAppend()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, HeaderValueOptionValidationError{
+ field: "Append",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, HeaderValueOptionValidationError{
+ field: "Append",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetAppend()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HeaderValueOptionValidationError{
+ field: "Append",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if _, ok := HeaderValueOption_HeaderAppendAction_name[int32(m.GetAppendAction())]; !ok {
+ err := HeaderValueOptionValidationError{
+ field: "AppendAction",
+ reason: "value must be one of the defined enum values",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ // no validation rules for KeepEmptyValue
+
+ if len(errors) > 0 {
+ return HeaderValueOptionMultiError(errors)
+ }
+
+ return nil
+}
+
+// HeaderValueOptionMultiError is an error wrapping multiple validation errors
+// returned by HeaderValueOption.ValidateAll() if the designated constraints
+// aren't met.
+type HeaderValueOptionMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m HeaderValueOptionMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m HeaderValueOptionMultiError) AllErrors() []error { return m }
+
+// HeaderValueOptionValidationError is the validation error returned by
+// HeaderValueOption.Validate if the designated constraints aren't met.
+type HeaderValueOptionValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e HeaderValueOptionValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e HeaderValueOptionValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e HeaderValueOptionValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e HeaderValueOptionValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e HeaderValueOptionValidationError) ErrorName() string {
+ return "HeaderValueOptionValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e HeaderValueOptionValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sHeaderValueOption.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = HeaderValueOptionValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = HeaderValueOptionValidationError{}
+
+// Validate checks the field values on HeaderMap with the rules defined in the
+// proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *HeaderMap) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on HeaderMap with the rules defined in
+// the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in HeaderMapMultiError, or nil
+// if none found.
+func (m *HeaderMap) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *HeaderMap) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ for idx, item := range m.GetHeaders() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, HeaderMapValidationError{
+ field: fmt.Sprintf("Headers[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, HeaderMapValidationError{
+ field: fmt.Sprintf("Headers[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HeaderMapValidationError{
+ field: fmt.Sprintf("Headers[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return HeaderMapMultiError(errors)
+ }
+
+ return nil
+}
+
+// HeaderMapMultiError is an error wrapping multiple validation errors returned
+// by HeaderMap.ValidateAll() if the designated constraints aren't met.
+type HeaderMapMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m HeaderMapMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m HeaderMapMultiError) AllErrors() []error { return m }
+
+// HeaderMapValidationError is the validation error returned by
+// HeaderMap.Validate if the designated constraints aren't met.
+type HeaderMapValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e HeaderMapValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e HeaderMapValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e HeaderMapValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e HeaderMapValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e HeaderMapValidationError) ErrorName() string { return "HeaderMapValidationError" }
+
+// Error satisfies the builtin error interface
+func (e HeaderMapValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sHeaderMap.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = HeaderMapValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = HeaderMapValidationError{}
+
+// Validate checks the field values on WatchedDirectory with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// first error encountered is returned, or nil if there are no violations.
+func (m *WatchedDirectory) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on WatchedDirectory with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// WatchedDirectoryMultiError, or nil if none found.
+func (m *WatchedDirectory) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *WatchedDirectory) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if utf8.RuneCountInString(m.GetPath()) < 1 {
+ err := WatchedDirectoryValidationError{
+ field: "Path",
+ reason: "value length must be at least 1 runes",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return WatchedDirectoryMultiError(errors)
+ }
+
+ return nil
+}
+
+// WatchedDirectoryMultiError is an error wrapping multiple validation errors
+// returned by WatchedDirectory.ValidateAll() if the designated constraints
+// aren't met.
+type WatchedDirectoryMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m WatchedDirectoryMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m WatchedDirectoryMultiError) AllErrors() []error { return m }
+
+// WatchedDirectoryValidationError is the validation error returned by
+// WatchedDirectory.Validate if the designated constraints aren't met.
+type WatchedDirectoryValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e WatchedDirectoryValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e WatchedDirectoryValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e WatchedDirectoryValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e WatchedDirectoryValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e WatchedDirectoryValidationError) ErrorName() string { return "WatchedDirectoryValidationError" }
+
+// Error satisfies the builtin error interface
+func (e WatchedDirectoryValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sWatchedDirectory.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = WatchedDirectoryValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = WatchedDirectoryValidationError{}
+
+// Validate checks the field values on DataSource with the rules defined in the
+// proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *DataSource) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on DataSource with the rules defined in
+// the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in DataSourceMultiError, or
+// nil if none found.
+func (m *DataSource) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *DataSource) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ oneofSpecifierPresent := false
+ switch v := m.Specifier.(type) {
+ case *DataSource_Filename:
+ if v == nil {
+ err := DataSourceValidationError{
+ field: "Specifier",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofSpecifierPresent = true
+
+ if utf8.RuneCountInString(m.GetFilename()) < 1 {
+ err := DataSourceValidationError{
+ field: "Filename",
+ reason: "value length must be at least 1 runes",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ case *DataSource_InlineBytes:
+ if v == nil {
+ err := DataSourceValidationError{
+ field: "Specifier",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofSpecifierPresent = true
+ // no validation rules for InlineBytes
+ case *DataSource_InlineString:
+ if v == nil {
+ err := DataSourceValidationError{
+ field: "Specifier",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofSpecifierPresent = true
+ // no validation rules for InlineString
+ case *DataSource_EnvironmentVariable:
+ if v == nil {
+ err := DataSourceValidationError{
+ field: "Specifier",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofSpecifierPresent = true
+
+ if utf8.RuneCountInString(m.GetEnvironmentVariable()) < 1 {
+ err := DataSourceValidationError{
+ field: "EnvironmentVariable",
+ reason: "value length must be at least 1 runes",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ default:
+ _ = v // ensures v is used
+ }
+ if !oneofSpecifierPresent {
+ err := DataSourceValidationError{
+ field: "Specifier",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return DataSourceMultiError(errors)
+ }
+
+ return nil
+}
+
+// DataSourceMultiError is an error wrapping multiple validation errors
+// returned by DataSource.ValidateAll() if the designated constraints aren't met.
+type DataSourceMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m DataSourceMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m DataSourceMultiError) AllErrors() []error { return m }
+
+// DataSourceValidationError is the validation error returned by
+// DataSource.Validate if the designated constraints aren't met.
+type DataSourceValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e DataSourceValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e DataSourceValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e DataSourceValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e DataSourceValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e DataSourceValidationError) ErrorName() string { return "DataSourceValidationError" }
+
+// Error satisfies the builtin error interface
+func (e DataSourceValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sDataSource.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = DataSourceValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = DataSourceValidationError{}
+
+// Validate checks the field values on RetryPolicy with the rules defined in
+// the proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *RetryPolicy) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on RetryPolicy with the rules defined in
+// the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in RetryPolicyMultiError, or
+// nil if none found.
+func (m *RetryPolicy) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *RetryPolicy) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if all {
+ switch v := interface{}(m.GetRetryBackOff()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, RetryPolicyValidationError{
+ field: "RetryBackOff",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, RetryPolicyValidationError{
+ field: "RetryBackOff",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetRetryBackOff()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return RetryPolicyValidationError{
+ field: "RetryBackOff",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetNumRetries()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, RetryPolicyValidationError{
+ field: "NumRetries",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, RetryPolicyValidationError{
+ field: "NumRetries",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetNumRetries()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return RetryPolicyValidationError{
+ field: "NumRetries",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return RetryPolicyMultiError(errors)
+ }
+
+ return nil
+}
+
+// RetryPolicyMultiError is an error wrapping multiple validation errors
+// returned by RetryPolicy.ValidateAll() if the designated constraints aren't met.
+type RetryPolicyMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m RetryPolicyMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m RetryPolicyMultiError) AllErrors() []error { return m }
+
+// RetryPolicyValidationError is the validation error returned by
+// RetryPolicy.Validate if the designated constraints aren't met.
+type RetryPolicyValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e RetryPolicyValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e RetryPolicyValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e RetryPolicyValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e RetryPolicyValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e RetryPolicyValidationError) ErrorName() string { return "RetryPolicyValidationError" }
+
+// Error satisfies the builtin error interface
+func (e RetryPolicyValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sRetryPolicy.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = RetryPolicyValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = RetryPolicyValidationError{}
+
+// Validate checks the field values on RemoteDataSource with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// first error encountered is returned, or nil if there are no violations.
+func (m *RemoteDataSource) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on RemoteDataSource with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// RemoteDataSourceMultiError, or nil if none found.
+func (m *RemoteDataSource) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *RemoteDataSource) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if m.GetHttpUri() == nil {
+ err := RemoteDataSourceValidationError{
+ field: "HttpUri",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetHttpUri()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, RemoteDataSourceValidationError{
+ field: "HttpUri",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, RemoteDataSourceValidationError{
+ field: "HttpUri",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetHttpUri()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return RemoteDataSourceValidationError{
+ field: "HttpUri",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if utf8.RuneCountInString(m.GetSha256()) < 1 {
+ err := RemoteDataSourceValidationError{
+ field: "Sha256",
+ reason: "value length must be at least 1 runes",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetRetryPolicy()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, RemoteDataSourceValidationError{
+ field: "RetryPolicy",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, RemoteDataSourceValidationError{
+ field: "RetryPolicy",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetRetryPolicy()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return RemoteDataSourceValidationError{
+ field: "RetryPolicy",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return RemoteDataSourceMultiError(errors)
+ }
+
+ return nil
+}
+
+// RemoteDataSourceMultiError is an error wrapping multiple validation errors
+// returned by RemoteDataSource.ValidateAll() if the designated constraints
+// aren't met.
+type RemoteDataSourceMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m RemoteDataSourceMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m RemoteDataSourceMultiError) AllErrors() []error { return m }
+
+// RemoteDataSourceValidationError is the validation error returned by
+// RemoteDataSource.Validate if the designated constraints aren't met.
+type RemoteDataSourceValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e RemoteDataSourceValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e RemoteDataSourceValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e RemoteDataSourceValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e RemoteDataSourceValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e RemoteDataSourceValidationError) ErrorName() string { return "RemoteDataSourceValidationError" }
+
+// Error satisfies the builtin error interface
+func (e RemoteDataSourceValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sRemoteDataSource.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = RemoteDataSourceValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = RemoteDataSourceValidationError{}
+
+// Validate checks the field values on AsyncDataSource with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// first error encountered is returned, or nil if there are no violations.
+func (m *AsyncDataSource) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on AsyncDataSource with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// AsyncDataSourceMultiError, or nil if none found.
+func (m *AsyncDataSource) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *AsyncDataSource) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ oneofSpecifierPresent := false
+ switch v := m.Specifier.(type) {
+ case *AsyncDataSource_Local:
+ if v == nil {
+ err := AsyncDataSourceValidationError{
+ field: "Specifier",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofSpecifierPresent = true
+
+ if all {
+ switch v := interface{}(m.GetLocal()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, AsyncDataSourceValidationError{
+ field: "Local",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, AsyncDataSourceValidationError{
+ field: "Local",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetLocal()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return AsyncDataSourceValidationError{
+ field: "Local",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *AsyncDataSource_Remote:
+ if v == nil {
+ err := AsyncDataSourceValidationError{
+ field: "Specifier",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofSpecifierPresent = true
+
+ if all {
+ switch v := interface{}(m.GetRemote()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, AsyncDataSourceValidationError{
+ field: "Remote",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, AsyncDataSourceValidationError{
+ field: "Remote",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetRemote()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return AsyncDataSourceValidationError{
+ field: "Remote",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ default:
+ _ = v // ensures v is used
+ }
+ if !oneofSpecifierPresent {
+ err := AsyncDataSourceValidationError{
+ field: "Specifier",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return AsyncDataSourceMultiError(errors)
+ }
+
+ return nil
+}
+
+// AsyncDataSourceMultiError is an error wrapping multiple validation errors
+// returned by AsyncDataSource.ValidateAll() if the designated constraints
+// aren't met.
+type AsyncDataSourceMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m AsyncDataSourceMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m AsyncDataSourceMultiError) AllErrors() []error { return m }
+
+// AsyncDataSourceValidationError is the validation error returned by
+// AsyncDataSource.Validate if the designated constraints aren't met.
+type AsyncDataSourceValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e AsyncDataSourceValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e AsyncDataSourceValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e AsyncDataSourceValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e AsyncDataSourceValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e AsyncDataSourceValidationError) ErrorName() string { return "AsyncDataSourceValidationError" }
+
+// Error satisfies the builtin error interface
+func (e AsyncDataSourceValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sAsyncDataSource.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = AsyncDataSourceValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = AsyncDataSourceValidationError{}
+
+// Validate checks the field values on TransportSocket with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// first error encountered is returned, or nil if there are no violations.
+func (m *TransportSocket) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on TransportSocket with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// TransportSocketMultiError, or nil if none found.
+func (m *TransportSocket) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *TransportSocket) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if utf8.RuneCountInString(m.GetName()) < 1 {
+ err := TransportSocketValidationError{
+ field: "Name",
+ reason: "value length must be at least 1 runes",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ switch v := m.ConfigType.(type) {
+ case *TransportSocket_TypedConfig:
+ if v == nil {
+ err := TransportSocketValidationError{
+ field: "ConfigType",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetTypedConfig()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, TransportSocketValidationError{
+ field: "TypedConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, TransportSocketValidationError{
+ field: "TypedConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetTypedConfig()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return TransportSocketValidationError{
+ field: "TypedConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ default:
+ _ = v // ensures v is used
+ }
+
+ if len(errors) > 0 {
+ return TransportSocketMultiError(errors)
+ }
+
+ return nil
+}
+
+// TransportSocketMultiError is an error wrapping multiple validation errors
+// returned by TransportSocket.ValidateAll() if the designated constraints
+// aren't met.
+type TransportSocketMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m TransportSocketMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m TransportSocketMultiError) AllErrors() []error { return m }
+
+// TransportSocketValidationError is the validation error returned by
+// TransportSocket.Validate if the designated constraints aren't met.
+type TransportSocketValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e TransportSocketValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e TransportSocketValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e TransportSocketValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e TransportSocketValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e TransportSocketValidationError) ErrorName() string { return "TransportSocketValidationError" }
+
+// Error satisfies the builtin error interface
+func (e TransportSocketValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sTransportSocket.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = TransportSocketValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = TransportSocketValidationError{}
+
+// Validate checks the field values on RuntimeFractionalPercent with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *RuntimeFractionalPercent) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on RuntimeFractionalPercent with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// RuntimeFractionalPercentMultiError, or nil if none found.
+func (m *RuntimeFractionalPercent) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *RuntimeFractionalPercent) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if m.GetDefaultValue() == nil {
+ err := RuntimeFractionalPercentValidationError{
+ field: "DefaultValue",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetDefaultValue()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, RuntimeFractionalPercentValidationError{
+ field: "DefaultValue",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, RuntimeFractionalPercentValidationError{
+ field: "DefaultValue",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetDefaultValue()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return RuntimeFractionalPercentValidationError{
+ field: "DefaultValue",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for RuntimeKey
+
+ if len(errors) > 0 {
+ return RuntimeFractionalPercentMultiError(errors)
+ }
+
+ return nil
+}
+
+// RuntimeFractionalPercentMultiError is an error wrapping multiple validation
+// errors returned by RuntimeFractionalPercent.ValidateAll() if the designated
+// constraints aren't met.
+type RuntimeFractionalPercentMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m RuntimeFractionalPercentMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m RuntimeFractionalPercentMultiError) AllErrors() []error { return m }
+
+// RuntimeFractionalPercentValidationError is the validation error returned by
+// RuntimeFractionalPercent.Validate if the designated constraints aren't met.
+type RuntimeFractionalPercentValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e RuntimeFractionalPercentValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e RuntimeFractionalPercentValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e RuntimeFractionalPercentValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e RuntimeFractionalPercentValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e RuntimeFractionalPercentValidationError) ErrorName() string {
+ return "RuntimeFractionalPercentValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e RuntimeFractionalPercentValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sRuntimeFractionalPercent.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = RuntimeFractionalPercentValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = RuntimeFractionalPercentValidationError{}
+
+// Validate checks the field values on ControlPlane with the rules defined in
+// the proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *ControlPlane) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on ControlPlane with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in ControlPlaneMultiError, or
+// nil if none found.
+func (m *ControlPlane) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *ControlPlane) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for Identifier
+
+ if len(errors) > 0 {
+ return ControlPlaneMultiError(errors)
+ }
+
+ return nil
+}
+
+// ControlPlaneMultiError is an error wrapping multiple validation errors
+// returned by ControlPlane.ValidateAll() if the designated constraints aren't met.
+type ControlPlaneMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ControlPlaneMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ControlPlaneMultiError) AllErrors() []error { return m }
+
+// ControlPlaneValidationError is the validation error returned by
+// ControlPlane.Validate if the designated constraints aren't met.
+type ControlPlaneValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ControlPlaneValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ControlPlaneValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ControlPlaneValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ControlPlaneValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ControlPlaneValidationError) ErrorName() string { return "ControlPlaneValidationError" }
+
+// Error satisfies the builtin error interface
+func (e ControlPlaneValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sControlPlane.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ControlPlaneValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ControlPlaneValidationError{}
diff --git a/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/config_source.pb.go b/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/config_source.pb.go
new file mode 100644
index 000000000..0ba48f390
--- /dev/null
+++ b/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/config_source.pb.go
@@ -0,0 +1,1203 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.23.0
+// protoc v4.23.1
+// source: envoy/config/core/v3/config_source.proto
+
+package corev3
+
+import (
+ _ "github.com/cilium/proxy/go/envoy/annotations"
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ v3 "github.com/cncf/xds/go/xds/core/v3"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ proto "github.com/golang/protobuf/proto"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ durationpb "google.golang.org/protobuf/types/known/durationpb"
+ wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+// xDS API and non-xDS services version. This is used to describe both resource and transport
+// protocol versions (in distinct configuration fields).
+type ApiVersion int32
+
+const (
+ // When not specified, we assume v2, to ease migration to Envoy's stable API
+ // versioning. If a client does not support v2 (e.g. due to deprecation), this
+ // is an invalid value.
+ //
+ // Deprecated: Do not use.
+ ApiVersion_AUTO ApiVersion = 0
+ // Use xDS v2 API.
+ //
+ // Deprecated: Do not use.
+ ApiVersion_V2 ApiVersion = 1
+ // Use xDS v3 API.
+ ApiVersion_V3 ApiVersion = 2
+)
+
+// Enum value maps for ApiVersion.
+var (
+ ApiVersion_name = map[int32]string{
+ 0: "AUTO",
+ 1: "V2",
+ 2: "V3",
+ }
+ ApiVersion_value = map[string]int32{
+ "AUTO": 0,
+ "V2": 1,
+ "V3": 2,
+ }
+)
+
+func (x ApiVersion) Enum() *ApiVersion {
+ p := new(ApiVersion)
+ *p = x
+ return p
+}
+
+func (x ApiVersion) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (ApiVersion) Descriptor() protoreflect.EnumDescriptor {
+ return file_envoy_config_core_v3_config_source_proto_enumTypes[0].Descriptor()
+}
+
+func (ApiVersion) Type() protoreflect.EnumType {
+ return &file_envoy_config_core_v3_config_source_proto_enumTypes[0]
+}
+
+func (x ApiVersion) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use ApiVersion.Descriptor instead.
+func (ApiVersion) EnumDescriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_config_source_proto_rawDescGZIP(), []int{0}
+}
+
+// APIs may be fetched via either REST or gRPC.
+type ApiConfigSource_ApiType int32
+
+const (
+ // Ideally this would be 'reserved 0' but one can't reserve the default
+ // value. Instead we throw an exception if this is ever used.
+ //
+ // Deprecated: Do not use.
+ ApiConfigSource_DEPRECATED_AND_UNAVAILABLE_DO_NOT_USE ApiConfigSource_ApiType = 0
+ // REST-JSON v2 API. The `canonical JSON encoding
+ // `_ for
+ // the v2 protos is used.
+ ApiConfigSource_REST ApiConfigSource_ApiType = 1
+ // SotW gRPC service.
+ ApiConfigSource_GRPC ApiConfigSource_ApiType = 2
+ // Using the delta xDS gRPC service, i.e. DeltaDiscovery{Request,Response}
+ // rather than Discovery{Request,Response}. Rather than sending Envoy the entire state
+ // with every update, the xDS server only sends what has changed since the last update.
+ ApiConfigSource_DELTA_GRPC ApiConfigSource_ApiType = 3
+ // SotW xDS gRPC with ADS. All resources which resolve to this configuration source will be
+ // multiplexed on a single connection to an ADS endpoint.
+ // [#not-implemented-hide:]
+ ApiConfigSource_AGGREGATED_GRPC ApiConfigSource_ApiType = 5
+ // Delta xDS gRPC with ADS. All resources which resolve to this configuration source will be
+ // multiplexed on a single connection to an ADS endpoint.
+ // [#not-implemented-hide:]
+ ApiConfigSource_AGGREGATED_DELTA_GRPC ApiConfigSource_ApiType = 6
+)
+
+// Enum value maps for ApiConfigSource_ApiType.
+var (
+ ApiConfigSource_ApiType_name = map[int32]string{
+ 0: "DEPRECATED_AND_UNAVAILABLE_DO_NOT_USE",
+ 1: "REST",
+ 2: "GRPC",
+ 3: "DELTA_GRPC",
+ 5: "AGGREGATED_GRPC",
+ 6: "AGGREGATED_DELTA_GRPC",
+ }
+ ApiConfigSource_ApiType_value = map[string]int32{
+ "DEPRECATED_AND_UNAVAILABLE_DO_NOT_USE": 0,
+ "REST": 1,
+ "GRPC": 2,
+ "DELTA_GRPC": 3,
+ "AGGREGATED_GRPC": 5,
+ "AGGREGATED_DELTA_GRPC": 6,
+ }
+)
+
+func (x ApiConfigSource_ApiType) Enum() *ApiConfigSource_ApiType {
+ p := new(ApiConfigSource_ApiType)
+ *p = x
+ return p
+}
+
+func (x ApiConfigSource_ApiType) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (ApiConfigSource_ApiType) Descriptor() protoreflect.EnumDescriptor {
+ return file_envoy_config_core_v3_config_source_proto_enumTypes[1].Descriptor()
+}
+
+func (ApiConfigSource_ApiType) Type() protoreflect.EnumType {
+ return &file_envoy_config_core_v3_config_source_proto_enumTypes[1]
+}
+
+func (x ApiConfigSource_ApiType) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use ApiConfigSource_ApiType.Descriptor instead.
+func (ApiConfigSource_ApiType) EnumDescriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_config_source_proto_rawDescGZIP(), []int{0, 0}
+}
+
+// API configuration source. This identifies the API type and cluster that Envoy
+// will use to fetch an xDS API.
+// [#next-free-field: 10]
+type ApiConfigSource struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // API type (gRPC, REST, delta gRPC)
+ ApiType ApiConfigSource_ApiType `protobuf:"varint,1,opt,name=api_type,json=apiType,proto3,enum=envoy.config.core.v3.ApiConfigSource_ApiType" json:"api_type,omitempty"`
+ // API version for xDS transport protocol. This describes the xDS gRPC/REST
+ // endpoint and version of [Delta]DiscoveryRequest/Response used on the wire.
+ TransportApiVersion ApiVersion `protobuf:"varint,8,opt,name=transport_api_version,json=transportApiVersion,proto3,enum=envoy.config.core.v3.ApiVersion" json:"transport_api_version,omitempty"`
+ // Cluster names should be used only with REST. If > 1
+ // cluster is defined, clusters will be cycled through if any kind of failure
+ // occurs.
+ //
+ // .. note::
+ //
+ // The cluster with name ``cluster_name`` must be statically defined and its
+ // type must not be ``EDS``.
+ ClusterNames []string `protobuf:"bytes,2,rep,name=cluster_names,json=clusterNames,proto3" json:"cluster_names,omitempty"`
+ // Multiple gRPC services be provided for GRPC. If > 1 cluster is defined,
+ // services will be cycled through if any kind of failure occurs.
+ GrpcServices []*GrpcService `protobuf:"bytes,4,rep,name=grpc_services,json=grpcServices,proto3" json:"grpc_services,omitempty"`
+ // For REST APIs, the delay between successive polls.
+ RefreshDelay *durationpb.Duration `protobuf:"bytes,3,opt,name=refresh_delay,json=refreshDelay,proto3" json:"refresh_delay,omitempty"`
+ // For REST APIs, the request timeout. If not set, a default value of 1s will be used.
+ RequestTimeout *durationpb.Duration `protobuf:"bytes,5,opt,name=request_timeout,json=requestTimeout,proto3" json:"request_timeout,omitempty"`
+ // For GRPC APIs, the rate limit settings. If present, discovery requests made by Envoy will be
+ // rate limited.
+ RateLimitSettings *RateLimitSettings `protobuf:"bytes,6,opt,name=rate_limit_settings,json=rateLimitSettings,proto3" json:"rate_limit_settings,omitempty"`
+ // Skip the node identifier in subsequent discovery requests for streaming gRPC config types.
+ SetNodeOnFirstMessageOnly bool `protobuf:"varint,7,opt,name=set_node_on_first_message_only,json=setNodeOnFirstMessageOnly,proto3" json:"set_node_on_first_message_only,omitempty"`
+ // A list of config validators that will be executed when a new update is
+ // received from the ApiConfigSource. Note that each validator handles a
+ // specific xDS service type, and only the validators corresponding to the
+ // type url (in “:ref: DiscoveryResponse“ or “:ref: DeltaDiscoveryResponse“)
+ // will be invoked.
+ // If the validator returns false or throws an exception, the config will be rejected by
+ // the client, and a NACK will be sent.
+ // [#extension-category: envoy.config.validators]
+ ConfigValidators []*TypedExtensionConfig `protobuf:"bytes,9,rep,name=config_validators,json=configValidators,proto3" json:"config_validators,omitempty"`
+}
+
+func (x *ApiConfigSource) Reset() {
+ *x = ApiConfigSource{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_config_source_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ApiConfigSource) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ApiConfigSource) ProtoMessage() {}
+
+func (x *ApiConfigSource) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_config_source_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ApiConfigSource.ProtoReflect.Descriptor instead.
+func (*ApiConfigSource) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_config_source_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *ApiConfigSource) GetApiType() ApiConfigSource_ApiType {
+ if x != nil {
+ return x.ApiType
+ }
+ return ApiConfigSource_DEPRECATED_AND_UNAVAILABLE_DO_NOT_USE
+}
+
+func (x *ApiConfigSource) GetTransportApiVersion() ApiVersion {
+ if x != nil {
+ return x.TransportApiVersion
+ }
+ return ApiVersion_AUTO
+}
+
+func (x *ApiConfigSource) GetClusterNames() []string {
+ if x != nil {
+ return x.ClusterNames
+ }
+ return nil
+}
+
+func (x *ApiConfigSource) GetGrpcServices() []*GrpcService {
+ if x != nil {
+ return x.GrpcServices
+ }
+ return nil
+}
+
+func (x *ApiConfigSource) GetRefreshDelay() *durationpb.Duration {
+ if x != nil {
+ return x.RefreshDelay
+ }
+ return nil
+}
+
+func (x *ApiConfigSource) GetRequestTimeout() *durationpb.Duration {
+ if x != nil {
+ return x.RequestTimeout
+ }
+ return nil
+}
+
+func (x *ApiConfigSource) GetRateLimitSettings() *RateLimitSettings {
+ if x != nil {
+ return x.RateLimitSettings
+ }
+ return nil
+}
+
+func (x *ApiConfigSource) GetSetNodeOnFirstMessageOnly() bool {
+ if x != nil {
+ return x.SetNodeOnFirstMessageOnly
+ }
+ return false
+}
+
+func (x *ApiConfigSource) GetConfigValidators() []*TypedExtensionConfig {
+ if x != nil {
+ return x.ConfigValidators
+ }
+ return nil
+}
+
+// Aggregated Discovery Service (ADS) options. This is currently empty, but when
+// set in :ref:`ConfigSource ` can be used to
+// specify that ADS is to be used.
+type AggregatedConfigSource struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *AggregatedConfigSource) Reset() {
+ *x = AggregatedConfigSource{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_config_source_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AggregatedConfigSource) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AggregatedConfigSource) ProtoMessage() {}
+
+func (x *AggregatedConfigSource) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_config_source_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AggregatedConfigSource.ProtoReflect.Descriptor instead.
+func (*AggregatedConfigSource) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_config_source_proto_rawDescGZIP(), []int{1}
+}
+
+// [#not-implemented-hide:]
+// Self-referencing config source options. This is currently empty, but when
+// set in :ref:`ConfigSource ` can be used to
+// specify that other data can be obtained from the same server.
+type SelfConfigSource struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // API version for xDS transport protocol. This describes the xDS gRPC/REST
+ // endpoint and version of [Delta]DiscoveryRequest/Response used on the wire.
+ TransportApiVersion ApiVersion `protobuf:"varint,1,opt,name=transport_api_version,json=transportApiVersion,proto3,enum=envoy.config.core.v3.ApiVersion" json:"transport_api_version,omitempty"`
+}
+
+func (x *SelfConfigSource) Reset() {
+ *x = SelfConfigSource{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_config_source_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SelfConfigSource) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SelfConfigSource) ProtoMessage() {}
+
+func (x *SelfConfigSource) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_config_source_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SelfConfigSource.ProtoReflect.Descriptor instead.
+func (*SelfConfigSource) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_config_source_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *SelfConfigSource) GetTransportApiVersion() ApiVersion {
+ if x != nil {
+ return x.TransportApiVersion
+ }
+ return ApiVersion_AUTO
+}
+
+// Rate Limit settings to be applied for discovery requests made by Envoy.
+type RateLimitSettings struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Maximum number of tokens to be used for rate limiting discovery request calls. If not set, a
+ // default value of 100 will be used.
+ MaxTokens *wrapperspb.UInt32Value `protobuf:"bytes,1,opt,name=max_tokens,json=maxTokens,proto3" json:"max_tokens,omitempty"`
+ // Rate at which tokens will be filled per second. If not set, a default fill rate of 10 tokens
+ // per second will be used.
+ FillRate *wrapperspb.DoubleValue `protobuf:"bytes,2,opt,name=fill_rate,json=fillRate,proto3" json:"fill_rate,omitempty"`
+}
+
+func (x *RateLimitSettings) Reset() {
+ *x = RateLimitSettings{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_config_source_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RateLimitSettings) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RateLimitSettings) ProtoMessage() {}
+
+func (x *RateLimitSettings) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_config_source_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RateLimitSettings.ProtoReflect.Descriptor instead.
+func (*RateLimitSettings) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_config_source_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *RateLimitSettings) GetMaxTokens() *wrapperspb.UInt32Value {
+ if x != nil {
+ return x.MaxTokens
+ }
+ return nil
+}
+
+func (x *RateLimitSettings) GetFillRate() *wrapperspb.DoubleValue {
+ if x != nil {
+ return x.FillRate
+ }
+ return nil
+}
+
+// Local filesystem path configuration source.
+type PathConfigSource struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Path on the filesystem to source and watch for configuration updates.
+ // When sourcing configuration for a :ref:`secret `,
+ // the certificate and key files are also watched for updates.
+ //
+ // .. note::
+ //
+ // The path to the source must exist at config load time.
+ //
+ // .. note::
+ //
+ // If ``watched_directory`` is *not* configured, Envoy will watch the file path for *moves*.
+ // This is because in general only moves are atomic. The same method of swapping files as is
+ // demonstrated in the :ref:`runtime documentation ` can be
+ // used here also. If ``watched_directory`` is configured, no watch will be placed directly on
+ // this path. Instead, the configured ``watched_directory`` will be used to trigger reloads of
+ // this path. This is required in certain deployment scenarios. See below for more information.
+ Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
+ // If configured, this directory will be watched for *moves*. When an entry in this directory is
+ // moved to, the “path“ will be reloaded. This is required in certain deployment scenarios.
+ //
+ // Specifically, if trying to load an xDS resource using a
+ // `Kubernetes ConfigMap `_, the
+ // following configuration might be used:
+ // 1. Store xds.yaml inside a ConfigMap.
+ // 2. Mount the ConfigMap to “/config_map/xds“
+ // 3. Configure path “/config_map/xds/xds.yaml“
+ // 4. Configure watched directory “/config_map/xds“
+ //
+ // The above configuration will ensure that Envoy watches the owning directory for moves which is
+ // required due to how Kubernetes manages ConfigMap symbolic links during atomic updates.
+ WatchedDirectory *WatchedDirectory `protobuf:"bytes,2,opt,name=watched_directory,json=watchedDirectory,proto3" json:"watched_directory,omitempty"`
+}
+
+func (x *PathConfigSource) Reset() {
+ *x = PathConfigSource{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_config_source_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *PathConfigSource) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*PathConfigSource) ProtoMessage() {}
+
+func (x *PathConfigSource) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_config_source_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use PathConfigSource.ProtoReflect.Descriptor instead.
+func (*PathConfigSource) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_config_source_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *PathConfigSource) GetPath() string {
+ if x != nil {
+ return x.Path
+ }
+ return ""
+}
+
+func (x *PathConfigSource) GetWatchedDirectory() *WatchedDirectory {
+ if x != nil {
+ return x.WatchedDirectory
+ }
+ return nil
+}
+
+// Configuration for :ref:`listeners `, :ref:`clusters
+// `, :ref:`routes
+// `, :ref:`endpoints
+// ` etc. may either be sourced from the
+// filesystem or from an xDS API source. Filesystem configs are watched with
+// inotify for updates.
+// [#next-free-field: 9]
+type ConfigSource struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Authorities that this config source may be used for. An authority specified in a xdstp:// URL
+ // is resolved to a “ConfigSource“ prior to configuration fetch. This field provides the
+ // association between authority name and configuration source.
+ // [#not-implemented-hide:]
+ Authorities []*v3.Authority `protobuf:"bytes,7,rep,name=authorities,proto3" json:"authorities,omitempty"`
+ // Types that are assignable to ConfigSourceSpecifier:
+ //
+ // *ConfigSource_Path
+ // *ConfigSource_PathConfigSource
+ // *ConfigSource_ApiConfigSource
+ // *ConfigSource_Ads
+ // *ConfigSource_Self
+ ConfigSourceSpecifier isConfigSource_ConfigSourceSpecifier `protobuf_oneof:"config_source_specifier"`
+ // When this timeout is specified, Envoy will wait no longer than the specified time for first
+ // config response on this xDS subscription during the :ref:`initialization process
+ // `. After reaching the timeout, Envoy will move to the next
+ // initialization phase, even if the first config is not delivered yet. The timer is activated
+ // when the xDS API subscription starts, and is disarmed on first config update or on error. 0
+ // means no timeout - Envoy will wait indefinitely for the first xDS config (unless another
+ // timeout applies). The default is 15s.
+ InitialFetchTimeout *durationpb.Duration `protobuf:"bytes,4,opt,name=initial_fetch_timeout,json=initialFetchTimeout,proto3" json:"initial_fetch_timeout,omitempty"`
+ // API version for xDS resources. This implies the type URLs that the client
+ // will request for resources and the resource type that the client will in
+ // turn expect to be delivered.
+ ResourceApiVersion ApiVersion `protobuf:"varint,6,opt,name=resource_api_version,json=resourceApiVersion,proto3,enum=envoy.config.core.v3.ApiVersion" json:"resource_api_version,omitempty"`
+}
+
+func (x *ConfigSource) Reset() {
+ *x = ConfigSource{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_config_source_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ConfigSource) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ConfigSource) ProtoMessage() {}
+
+func (x *ConfigSource) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_config_source_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ConfigSource.ProtoReflect.Descriptor instead.
+func (*ConfigSource) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_config_source_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *ConfigSource) GetAuthorities() []*v3.Authority {
+ if x != nil {
+ return x.Authorities
+ }
+ return nil
+}
+
+func (m *ConfigSource) GetConfigSourceSpecifier() isConfigSource_ConfigSourceSpecifier {
+ if m != nil {
+ return m.ConfigSourceSpecifier
+ }
+ return nil
+}
+
+// Deprecated: Do not use.
+func (x *ConfigSource) GetPath() string {
+ if x, ok := x.GetConfigSourceSpecifier().(*ConfigSource_Path); ok {
+ return x.Path
+ }
+ return ""
+}
+
+func (x *ConfigSource) GetPathConfigSource() *PathConfigSource {
+ if x, ok := x.GetConfigSourceSpecifier().(*ConfigSource_PathConfigSource); ok {
+ return x.PathConfigSource
+ }
+ return nil
+}
+
+func (x *ConfigSource) GetApiConfigSource() *ApiConfigSource {
+ if x, ok := x.GetConfigSourceSpecifier().(*ConfigSource_ApiConfigSource); ok {
+ return x.ApiConfigSource
+ }
+ return nil
+}
+
+func (x *ConfigSource) GetAds() *AggregatedConfigSource {
+ if x, ok := x.GetConfigSourceSpecifier().(*ConfigSource_Ads); ok {
+ return x.Ads
+ }
+ return nil
+}
+
+func (x *ConfigSource) GetSelf() *SelfConfigSource {
+ if x, ok := x.GetConfigSourceSpecifier().(*ConfigSource_Self); ok {
+ return x.Self
+ }
+ return nil
+}
+
+func (x *ConfigSource) GetInitialFetchTimeout() *durationpb.Duration {
+ if x != nil {
+ return x.InitialFetchTimeout
+ }
+ return nil
+}
+
+func (x *ConfigSource) GetResourceApiVersion() ApiVersion {
+ if x != nil {
+ return x.ResourceApiVersion
+ }
+ return ApiVersion_AUTO
+}
+
+type isConfigSource_ConfigSourceSpecifier interface {
+ isConfigSource_ConfigSourceSpecifier()
+}
+
+type ConfigSource_Path struct {
+ // Deprecated in favor of “path_config_source“. Use that field instead.
+ //
+ // Deprecated: Do not use.
+ Path string `protobuf:"bytes,1,opt,name=path,proto3,oneof"`
+}
+
+type ConfigSource_PathConfigSource struct {
+ // Local filesystem path configuration source.
+ PathConfigSource *PathConfigSource `protobuf:"bytes,8,opt,name=path_config_source,json=pathConfigSource,proto3,oneof"`
+}
+
+type ConfigSource_ApiConfigSource struct {
+ // API configuration source.
+ ApiConfigSource *ApiConfigSource `protobuf:"bytes,2,opt,name=api_config_source,json=apiConfigSource,proto3,oneof"`
+}
+
+type ConfigSource_Ads struct {
+ // When set, ADS will be used to fetch resources. The ADS API configuration
+ // source in the bootstrap configuration is used.
+ Ads *AggregatedConfigSource `protobuf:"bytes,3,opt,name=ads,proto3,oneof"`
+}
+
+type ConfigSource_Self struct {
+ // [#not-implemented-hide:]
+ // When set, the client will access the resources from the same server it got the
+ // ConfigSource from, although not necessarily from the same stream. This is similar to the
+ // :ref:`ads` field, except that the client may use a
+ // different stream to the same server. As a result, this field can be used for things
+ // like LRS that cannot be sent on an ADS stream. It can also be used to link from (e.g.)
+ // LDS to RDS on the same server without requiring the management server to know its name
+ // or required credentials.
+ // [#next-major-version: In xDS v3, consider replacing the ads field with this one, since
+ // this field can implicitly mean to use the same stream in the case where the ConfigSource
+ // is provided via ADS and the specified data can also be obtained via ADS.]
+ Self *SelfConfigSource `protobuf:"bytes,5,opt,name=self,proto3,oneof"`
+}
+
+func (*ConfigSource_Path) isConfigSource_ConfigSourceSpecifier() {}
+
+func (*ConfigSource_PathConfigSource) isConfigSource_ConfigSourceSpecifier() {}
+
+func (*ConfigSource_ApiConfigSource) isConfigSource_ConfigSourceSpecifier() {}
+
+func (*ConfigSource_Ads) isConfigSource_ConfigSourceSpecifier() {}
+
+func (*ConfigSource_Self) isConfigSource_ConfigSourceSpecifier() {}
+
+// Configuration source specifier for a late-bound extension configuration. The
+// parent resource is warmed until all the initial extension configurations are
+// received, unless the flag to apply the default configuration is set.
+// Subsequent extension updates are atomic on a per-worker basis. Once an
+// extension configuration is applied to a request or a connection, it remains
+// constant for the duration of processing. If the initial delivery of the
+// extension configuration fails, due to a timeout for example, the optional
+// default configuration is applied. Without a default configuration, the
+// extension is disabled, until an extension configuration is received. The
+// behavior of a disabled extension depends on the context. For example, a
+// filter chain with a disabled extension filter rejects all incoming streams.
+type ExtensionConfigSource struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ ConfigSource *ConfigSource `protobuf:"bytes,1,opt,name=config_source,json=configSource,proto3" json:"config_source,omitempty"`
+ // Optional default configuration to use as the initial configuration if
+ // there is a failure to receive the initial extension configuration or if
+ // “apply_default_config_without_warming“ flag is set.
+ DefaultConfig *anypb.Any `protobuf:"bytes,2,opt,name=default_config,json=defaultConfig,proto3" json:"default_config,omitempty"`
+ // Use the default config as the initial configuration without warming and
+ // waiting for the first discovery response. Requires the default configuration
+ // to be supplied.
+ ApplyDefaultConfigWithoutWarming bool `protobuf:"varint,3,opt,name=apply_default_config_without_warming,json=applyDefaultConfigWithoutWarming,proto3" json:"apply_default_config_without_warming,omitempty"`
+ // A set of permitted extension type URLs. Extension configuration updates are rejected
+ // if they do not match any type URL in the set.
+ TypeUrls []string `protobuf:"bytes,4,rep,name=type_urls,json=typeUrls,proto3" json:"type_urls,omitempty"`
+}
+
+func (x *ExtensionConfigSource) Reset() {
+ *x = ExtensionConfigSource{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_config_source_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ExtensionConfigSource) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ExtensionConfigSource) ProtoMessage() {}
+
+func (x *ExtensionConfigSource) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_config_source_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ExtensionConfigSource.ProtoReflect.Descriptor instead.
+func (*ExtensionConfigSource) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_config_source_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *ExtensionConfigSource) GetConfigSource() *ConfigSource {
+ if x != nil {
+ return x.ConfigSource
+ }
+ return nil
+}
+
+func (x *ExtensionConfigSource) GetDefaultConfig() *anypb.Any {
+ if x != nil {
+ return x.DefaultConfig
+ }
+ return nil
+}
+
+func (x *ExtensionConfigSource) GetApplyDefaultConfigWithoutWarming() bool {
+ if x != nil {
+ return x.ApplyDefaultConfigWithoutWarming
+ }
+ return false
+}
+
+func (x *ExtensionConfigSource) GetTypeUrls() []string {
+ if x != nil {
+ return x.TypeUrls
+ }
+ return nil
+}
+
+var File_envoy_config_core_v3_config_source_proto protoreflect.FileDescriptor
+
+var file_envoy_config_core_v3_config_source_proto_rawDesc = []byte{
+ 0x0a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63,
+ 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33,
+ 0x1a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63,
+ 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x1a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f,
+ 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f,
+ 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x67, 0x72,
+ 0x70, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61,
+ 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x78, 0x64, 0x73,
+ 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69,
+ 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f,
+ 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x64, 0x65, 0x70, 0x72,
+ 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75,
+ 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f,
+ 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64,
+ 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76,
+ 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
+ 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61,
+ 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xf4, 0x06, 0x0a, 0x0f, 0x41, 0x70, 0x69,
+ 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x52, 0x0a, 0x08,
+ 0x61, 0x70, 0x69, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d,
+ 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f,
+ 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x70, 0x69, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x41, 0x70, 0x69, 0x54, 0x79, 0x70, 0x65, 0x42, 0x08, 0xfa,
+ 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x07, 0x61, 0x70, 0x69, 0x54, 0x79, 0x70, 0x65,
+ 0x12, 0x5e, 0x0a, 0x15, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x61, 0x70,
+ 0x69, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0e, 0x32,
+ 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63,
+ 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f,
+ 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x13, 0x74, 0x72, 0x61,
+ 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x41, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e,
+ 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65,
+ 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72,
+ 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x46, 0x0a, 0x0d, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x73, 0x65,
+ 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65,
+ 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65,
+ 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52,
+ 0x0c, 0x67, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x3e, 0x0a,
+ 0x0d, 0x72, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52,
+ 0x0c, 0x72, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x4c, 0x0a,
+ 0x0f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74,
+ 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0xaa, 0x01, 0x02, 0x2a, 0x00, 0x52, 0x0e, 0x72, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x57, 0x0a, 0x13, 0x72,
+ 0x61, 0x74, 0x65, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e,
+ 0x67, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e,
+ 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67,
+ 0x73, 0x52, 0x11, 0x72, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x53, 0x65, 0x74, 0x74,
+ 0x69, 0x6e, 0x67, 0x73, 0x12, 0x41, 0x0a, 0x1e, 0x73, 0x65, 0x74, 0x5f, 0x6e, 0x6f, 0x64, 0x65,
+ 0x5f, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67,
+ 0x65, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x19, 0x73, 0x65,
+ 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x4f, 0x6e, 0x46, 0x69, 0x72, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73,
+ 0x61, 0x67, 0x65, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x57, 0x0a, 0x11, 0x63, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x09, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45,
+ 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x10,
+ 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x73,
+ 0x22, 0x92, 0x01, 0x0a, 0x07, 0x41, 0x70, 0x69, 0x54, 0x79, 0x70, 0x65, 0x12, 0x33, 0x0a, 0x25,
+ 0x44, 0x45, 0x50, 0x52, 0x45, 0x43, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x55,
+ 0x4e, 0x41, 0x56, 0x41, 0x49, 0x4c, 0x41, 0x42, 0x4c, 0x45, 0x5f, 0x44, 0x4f, 0x5f, 0x4e, 0x4f,
+ 0x54, 0x5f, 0x55, 0x53, 0x45, 0x10, 0x00, 0x1a, 0x08, 0x08, 0x01, 0xa8, 0xf7, 0xb4, 0x8b, 0x02,
+ 0x01, 0x12, 0x08, 0x0a, 0x04, 0x52, 0x45, 0x53, 0x54, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x47,
+ 0x52, 0x50, 0x43, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x44, 0x45, 0x4c, 0x54, 0x41, 0x5f, 0x47,
+ 0x52, 0x50, 0x43, 0x10, 0x03, 0x12, 0x13, 0x0a, 0x0f, 0x41, 0x47, 0x47, 0x52, 0x45, 0x47, 0x41,
+ 0x54, 0x45, 0x44, 0x5f, 0x47, 0x52, 0x50, 0x43, 0x10, 0x05, 0x12, 0x19, 0x0a, 0x15, 0x41, 0x47,
+ 0x47, 0x52, 0x45, 0x47, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x44, 0x45, 0x4c, 0x54, 0x41, 0x5f, 0x47,
+ 0x52, 0x50, 0x43, 0x10, 0x06, 0x3a, 0x28, 0x9a, 0xc5, 0x88, 0x1e, 0x23, 0x0a, 0x21, 0x65, 0x6e,
+ 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e,
+ 0x41, 0x70, 0x69, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22,
+ 0x49, 0x0a, 0x16, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3a, 0x2f, 0x9a, 0xc5, 0x88, 0x1e, 0x2a,
+ 0x0a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63,
+ 0x6f, 0x72, 0x65, 0x2e, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, 0x9d, 0x01, 0x0a, 0x10, 0x53,
+ 0x65, 0x6c, 0x66, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12,
+ 0x5e, 0x0a, 0x15, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x61, 0x70, 0x69,
+ 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20,
+ 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f,
+ 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e,
+ 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x13, 0x74, 0x72, 0x61, 0x6e,
+ 0x73, 0x70, 0x6f, 0x72, 0x74, 0x41, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x3a,
+ 0x29, 0x9a, 0xc5, 0x88, 0x1e, 0x24, 0x0a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70,
+ 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x53, 0x65, 0x6c, 0x66, 0x43, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, 0xc7, 0x01, 0x0a, 0x11, 0x52,
+ 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73,
+ 0x12, 0x3b, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c,
+ 0x75, 0x65, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x12, 0x49, 0x0a,
+ 0x09, 0x66, 0x69, 0x6c, 0x6c, 0x5f, 0x72, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0e,
+ 0xfa, 0x42, 0x0b, 0x12, 0x09, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, 0x08,
+ 0x66, 0x69, 0x6c, 0x6c, 0x52, 0x61, 0x74, 0x65, 0x3a, 0x2a, 0x9a, 0xc5, 0x88, 0x1e, 0x25, 0x0a,
+ 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f,
+ 0x72, 0x65, 0x2e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x53, 0x65, 0x74, 0x74,
+ 0x69, 0x6e, 0x67, 0x73, 0x22, 0x84, 0x01, 0x0a, 0x10, 0x50, 0x61, 0x74, 0x68, 0x43, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1b, 0x0a, 0x04, 0x70, 0x61, 0x74,
+ 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01,
+ 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x53, 0x0a, 0x11, 0x77, 0x61, 0x74, 0x63, 0x68, 0x65,
+ 0x64, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64,
+ 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x10, 0x77, 0x61, 0x74, 0x63, 0x68,
+ 0x65, 0x64, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x22, 0x8c, 0x05, 0x0a, 0x0c,
+ 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x38, 0x0a, 0x0b,
+ 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28,
+ 0x0b, 0x32, 0x16, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e,
+ 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x52, 0x0b, 0x61, 0x75, 0x74, 0x68, 0x6f,
+ 0x72, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x21, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0x18, 0x01, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e,
+ 0x30, 0x48, 0x00, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x56, 0x0a, 0x12, 0x70, 0x61, 0x74,
+ 0x68, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18,
+ 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x61, 0x74,
+ 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x48, 0x00, 0x52,
+ 0x10, 0x70, 0x61, 0x74, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63,
+ 0x65, 0x12, 0x53, 0x0a, 0x11, 0x61, 0x70, 0x69, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f,
+ 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65,
+ 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65,
+ 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x70, 0x69, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x48, 0x00, 0x52, 0x0f, 0x61, 0x70, 0x69, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x40, 0x0a, 0x03, 0x61, 0x64, 0x73, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x67, 0x67, 0x72, 0x65,
+ 0x67, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63,
+ 0x65, 0x48, 0x00, 0x52, 0x03, 0x61, 0x64, 0x73, 0x12, 0x3c, 0x0a, 0x04, 0x73, 0x65, 0x6c, 0x66,
+ 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65,
+ 0x6c, 0x66, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x48, 0x00,
+ 0x52, 0x04, 0x73, 0x65, 0x6c, 0x66, 0x12, 0x4d, 0x0a, 0x15, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61,
+ 0x6c, 0x5f, 0x66, 0x65, 0x74, 0x63, 0x68, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18,
+ 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x52, 0x13, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x46, 0x65, 0x74, 0x63, 0x68, 0x54, 0x69,
+ 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x5c, 0x0a, 0x14, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
+ 0x65, 0x5f, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20,
+ 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x70, 0x69, 0x56, 0x65,
+ 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52,
+ 0x12, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x41, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73,
+ 0x69, 0x6f, 0x6e, 0x3a, 0x25, 0x9a, 0xc5, 0x88, 0x1e, 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x1e, 0x0a, 0x17, 0x63, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63,
+ 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0x9e, 0x02, 0x0a, 0x15, 0x45,
+ 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x12, 0x51, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e,
+ 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e,
+ 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42,
+ 0x08, 0xfa, 0x42, 0x05, 0xa2, 0x01, 0x02, 0x08, 0x01, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x3b, 0x0a, 0x0e, 0x64, 0x65, 0x66, 0x61, 0x75,
+ 0x6c, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x43, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4e, 0x0a, 0x24, 0x61, 0x70, 0x70, 0x6c, 0x79, 0x5f, 0x64, 0x65,
+ 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x77, 0x69, 0x74,
+ 0x68, 0x6f, 0x75, 0x74, 0x5f, 0x77, 0x61, 0x72, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x08, 0x52, 0x20, 0x61, 0x70, 0x70, 0x6c, 0x79, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74,
+ 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x57, 0x69, 0x74, 0x68, 0x6f, 0x75, 0x74, 0x57, 0x61, 0x72,
+ 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x25, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x75, 0x72, 0x6c,
+ 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08,
+ 0x01, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x55, 0x72, 0x6c, 0x73, 0x2a, 0x40, 0x0a, 0x0a, 0x41,
+ 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x15, 0x0a, 0x04, 0x41, 0x55, 0x54,
+ 0x4f, 0x10, 0x00, 0x1a, 0x0b, 0x08, 0x01, 0x8a, 0xf4, 0x9b, 0xb3, 0x05, 0x03, 0x33, 0x2e, 0x30,
+ 0x12, 0x13, 0x0a, 0x02, 0x56, 0x32, 0x10, 0x01, 0x1a, 0x0b, 0x08, 0x01, 0x8a, 0xf4, 0x9b, 0xb3,
+ 0x05, 0x03, 0x33, 0x2e, 0x30, 0x12, 0x06, 0x0a, 0x02, 0x56, 0x33, 0x10, 0x02, 0x42, 0x85, 0x01,
+ 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72,
+ 0x65, 0x2e, 0x76, 0x33, 0x42, 0x11, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75,
+ 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79,
+ 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e,
+ 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63,
+ 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x63, 0x6f, 0x72, 0x65, 0x76, 0x33, 0xba, 0x80, 0xc8,
+ 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_config_core_v3_config_source_proto_rawDescOnce sync.Once
+ file_envoy_config_core_v3_config_source_proto_rawDescData = file_envoy_config_core_v3_config_source_proto_rawDesc
+)
+
+func file_envoy_config_core_v3_config_source_proto_rawDescGZIP() []byte {
+ file_envoy_config_core_v3_config_source_proto_rawDescOnce.Do(func() {
+ file_envoy_config_core_v3_config_source_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_core_v3_config_source_proto_rawDescData)
+ })
+ return file_envoy_config_core_v3_config_source_proto_rawDescData
+}
+
+var file_envoy_config_core_v3_config_source_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
+var file_envoy_config_core_v3_config_source_proto_msgTypes = make([]protoimpl.MessageInfo, 7)
+var file_envoy_config_core_v3_config_source_proto_goTypes = []interface{}{
+ (ApiVersion)(0), // 0: envoy.config.core.v3.ApiVersion
+ (ApiConfigSource_ApiType)(0), // 1: envoy.config.core.v3.ApiConfigSource.ApiType
+ (*ApiConfigSource)(nil), // 2: envoy.config.core.v3.ApiConfigSource
+ (*AggregatedConfigSource)(nil), // 3: envoy.config.core.v3.AggregatedConfigSource
+ (*SelfConfigSource)(nil), // 4: envoy.config.core.v3.SelfConfigSource
+ (*RateLimitSettings)(nil), // 5: envoy.config.core.v3.RateLimitSettings
+ (*PathConfigSource)(nil), // 6: envoy.config.core.v3.PathConfigSource
+ (*ConfigSource)(nil), // 7: envoy.config.core.v3.ConfigSource
+ (*ExtensionConfigSource)(nil), // 8: envoy.config.core.v3.ExtensionConfigSource
+ (*GrpcService)(nil), // 9: envoy.config.core.v3.GrpcService
+ (*durationpb.Duration)(nil), // 10: google.protobuf.Duration
+ (*TypedExtensionConfig)(nil), // 11: envoy.config.core.v3.TypedExtensionConfig
+ (*wrapperspb.UInt32Value)(nil), // 12: google.protobuf.UInt32Value
+ (*wrapperspb.DoubleValue)(nil), // 13: google.protobuf.DoubleValue
+ (*WatchedDirectory)(nil), // 14: envoy.config.core.v3.WatchedDirectory
+ (*v3.Authority)(nil), // 15: xds.core.v3.Authority
+ (*anypb.Any)(nil), // 16: google.protobuf.Any
+}
+var file_envoy_config_core_v3_config_source_proto_depIdxs = []int32{
+ 1, // 0: envoy.config.core.v3.ApiConfigSource.api_type:type_name -> envoy.config.core.v3.ApiConfigSource.ApiType
+ 0, // 1: envoy.config.core.v3.ApiConfigSource.transport_api_version:type_name -> envoy.config.core.v3.ApiVersion
+ 9, // 2: envoy.config.core.v3.ApiConfigSource.grpc_services:type_name -> envoy.config.core.v3.GrpcService
+ 10, // 3: envoy.config.core.v3.ApiConfigSource.refresh_delay:type_name -> google.protobuf.Duration
+ 10, // 4: envoy.config.core.v3.ApiConfigSource.request_timeout:type_name -> google.protobuf.Duration
+ 5, // 5: envoy.config.core.v3.ApiConfigSource.rate_limit_settings:type_name -> envoy.config.core.v3.RateLimitSettings
+ 11, // 6: envoy.config.core.v3.ApiConfigSource.config_validators:type_name -> envoy.config.core.v3.TypedExtensionConfig
+ 0, // 7: envoy.config.core.v3.SelfConfigSource.transport_api_version:type_name -> envoy.config.core.v3.ApiVersion
+ 12, // 8: envoy.config.core.v3.RateLimitSettings.max_tokens:type_name -> google.protobuf.UInt32Value
+ 13, // 9: envoy.config.core.v3.RateLimitSettings.fill_rate:type_name -> google.protobuf.DoubleValue
+ 14, // 10: envoy.config.core.v3.PathConfigSource.watched_directory:type_name -> envoy.config.core.v3.WatchedDirectory
+ 15, // 11: envoy.config.core.v3.ConfigSource.authorities:type_name -> xds.core.v3.Authority
+ 6, // 12: envoy.config.core.v3.ConfigSource.path_config_source:type_name -> envoy.config.core.v3.PathConfigSource
+ 2, // 13: envoy.config.core.v3.ConfigSource.api_config_source:type_name -> envoy.config.core.v3.ApiConfigSource
+ 3, // 14: envoy.config.core.v3.ConfigSource.ads:type_name -> envoy.config.core.v3.AggregatedConfigSource
+ 4, // 15: envoy.config.core.v3.ConfigSource.self:type_name -> envoy.config.core.v3.SelfConfigSource
+ 10, // 16: envoy.config.core.v3.ConfigSource.initial_fetch_timeout:type_name -> google.protobuf.Duration
+ 0, // 17: envoy.config.core.v3.ConfigSource.resource_api_version:type_name -> envoy.config.core.v3.ApiVersion
+ 7, // 18: envoy.config.core.v3.ExtensionConfigSource.config_source:type_name -> envoy.config.core.v3.ConfigSource
+ 16, // 19: envoy.config.core.v3.ExtensionConfigSource.default_config:type_name -> google.protobuf.Any
+ 20, // [20:20] is the sub-list for method output_type
+ 20, // [20:20] is the sub-list for method input_type
+ 20, // [20:20] is the sub-list for extension type_name
+ 20, // [20:20] is the sub-list for extension extendee
+ 0, // [0:20] is the sub-list for field type_name
+}
+
+func init() { file_envoy_config_core_v3_config_source_proto_init() }
+func file_envoy_config_core_v3_config_source_proto_init() {
+ if File_envoy_config_core_v3_config_source_proto != nil {
+ return
+ }
+ file_envoy_config_core_v3_base_proto_init()
+ file_envoy_config_core_v3_extension_proto_init()
+ file_envoy_config_core_v3_grpc_service_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_config_core_v3_config_source_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ApiConfigSource); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_config_source_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AggregatedConfigSource); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_config_source_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SelfConfigSource); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_config_source_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RateLimitSettings); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_config_source_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*PathConfigSource); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_config_source_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ConfigSource); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_config_source_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ExtensionConfigSource); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_envoy_config_core_v3_config_source_proto_msgTypes[5].OneofWrappers = []interface{}{
+ (*ConfigSource_Path)(nil),
+ (*ConfigSource_PathConfigSource)(nil),
+ (*ConfigSource_ApiConfigSource)(nil),
+ (*ConfigSource_Ads)(nil),
+ (*ConfigSource_Self)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_config_core_v3_config_source_proto_rawDesc,
+ NumEnums: 2,
+ NumMessages: 7,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_config_core_v3_config_source_proto_goTypes,
+ DependencyIndexes: file_envoy_config_core_v3_config_source_proto_depIdxs,
+ EnumInfos: file_envoy_config_core_v3_config_source_proto_enumTypes,
+ MessageInfos: file_envoy_config_core_v3_config_source_proto_msgTypes,
+ }.Build()
+ File_envoy_config_core_v3_config_source_proto = out.File
+ file_envoy_config_core_v3_config_source_proto_rawDesc = nil
+ file_envoy_config_core_v3_config_source_proto_goTypes = nil
+ file_envoy_config_core_v3_config_source_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/config_source.pb.validate.go b/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/config_source.pb.validate.go
new file mode 100644
index 000000000..edab15135
--- /dev/null
+++ b/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/config_source.pb.validate.go
@@ -0,0 +1,1344 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/config/core/v3/config_source.proto
+
+package corev3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on ApiConfigSource with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// first error encountered is returned, or nil if there are no violations.
+func (m *ApiConfigSource) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on ApiConfigSource with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// ApiConfigSourceMultiError, or nil if none found.
+func (m *ApiConfigSource) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *ApiConfigSource) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if _, ok := ApiConfigSource_ApiType_name[int32(m.GetApiType())]; !ok {
+ err := ApiConfigSourceValidationError{
+ field: "ApiType",
+ reason: "value must be one of the defined enum values",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if _, ok := ApiVersion_name[int32(m.GetTransportApiVersion())]; !ok {
+ err := ApiConfigSourceValidationError{
+ field: "TransportApiVersion",
+ reason: "value must be one of the defined enum values",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ for idx, item := range m.GetGrpcServices() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ApiConfigSourceValidationError{
+ field: fmt.Sprintf("GrpcServices[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ApiConfigSourceValidationError{
+ field: fmt.Sprintf("GrpcServices[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ApiConfigSourceValidationError{
+ field: fmt.Sprintf("GrpcServices[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if all {
+ switch v := interface{}(m.GetRefreshDelay()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ApiConfigSourceValidationError{
+ field: "RefreshDelay",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ApiConfigSourceValidationError{
+ field: "RefreshDelay",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetRefreshDelay()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ApiConfigSourceValidationError{
+ field: "RefreshDelay",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if d := m.GetRequestTimeout(); d != nil {
+ dur, err := d.AsDuration(), d.CheckValid()
+ if err != nil {
+ err = ApiConfigSourceValidationError{
+ field: "RequestTimeout",
+ reason: "value is not a valid duration",
+ cause: err,
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ } else {
+
+ gt := time.Duration(0*time.Second + 0*time.Nanosecond)
+
+ if dur <= gt {
+ err := ApiConfigSourceValidationError{
+ field: "RequestTimeout",
+ reason: "value must be greater than 0s",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetRateLimitSettings()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ApiConfigSourceValidationError{
+ field: "RateLimitSettings",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ApiConfigSourceValidationError{
+ field: "RateLimitSettings",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetRateLimitSettings()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ApiConfigSourceValidationError{
+ field: "RateLimitSettings",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for SetNodeOnFirstMessageOnly
+
+ for idx, item := range m.GetConfigValidators() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ApiConfigSourceValidationError{
+ field: fmt.Sprintf("ConfigValidators[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ApiConfigSourceValidationError{
+ field: fmt.Sprintf("ConfigValidators[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ApiConfigSourceValidationError{
+ field: fmt.Sprintf("ConfigValidators[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return ApiConfigSourceMultiError(errors)
+ }
+
+ return nil
+}
+
+// ApiConfigSourceMultiError is an error wrapping multiple validation errors
+// returned by ApiConfigSource.ValidateAll() if the designated constraints
+// aren't met.
+type ApiConfigSourceMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ApiConfigSourceMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ApiConfigSourceMultiError) AllErrors() []error { return m }
+
+// ApiConfigSourceValidationError is the validation error returned by
+// ApiConfigSource.Validate if the designated constraints aren't met.
+type ApiConfigSourceValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ApiConfigSourceValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ApiConfigSourceValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ApiConfigSourceValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ApiConfigSourceValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ApiConfigSourceValidationError) ErrorName() string { return "ApiConfigSourceValidationError" }
+
+// Error satisfies the builtin error interface
+func (e ApiConfigSourceValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sApiConfigSource.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ApiConfigSourceValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ApiConfigSourceValidationError{}
+
+// Validate checks the field values on AggregatedConfigSource with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *AggregatedConfigSource) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on AggregatedConfigSource with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// AggregatedConfigSourceMultiError, or nil if none found.
+func (m *AggregatedConfigSource) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *AggregatedConfigSource) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if len(errors) > 0 {
+ return AggregatedConfigSourceMultiError(errors)
+ }
+
+ return nil
+}
+
+// AggregatedConfigSourceMultiError is an error wrapping multiple validation
+// errors returned by AggregatedConfigSource.ValidateAll() if the designated
+// constraints aren't met.
+type AggregatedConfigSourceMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m AggregatedConfigSourceMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m AggregatedConfigSourceMultiError) AllErrors() []error { return m }
+
+// AggregatedConfigSourceValidationError is the validation error returned by
+// AggregatedConfigSource.Validate if the designated constraints aren't met.
+type AggregatedConfigSourceValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e AggregatedConfigSourceValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e AggregatedConfigSourceValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e AggregatedConfigSourceValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e AggregatedConfigSourceValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e AggregatedConfigSourceValidationError) ErrorName() string {
+ return "AggregatedConfigSourceValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e AggregatedConfigSourceValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sAggregatedConfigSource.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = AggregatedConfigSourceValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = AggregatedConfigSourceValidationError{}
+
+// Validate checks the field values on SelfConfigSource with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// first error encountered is returned, or nil if there are no violations.
+func (m *SelfConfigSource) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on SelfConfigSource with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// SelfConfigSourceMultiError, or nil if none found.
+func (m *SelfConfigSource) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *SelfConfigSource) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if _, ok := ApiVersion_name[int32(m.GetTransportApiVersion())]; !ok {
+ err := SelfConfigSourceValidationError{
+ field: "TransportApiVersion",
+ reason: "value must be one of the defined enum values",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return SelfConfigSourceMultiError(errors)
+ }
+
+ return nil
+}
+
+// SelfConfigSourceMultiError is an error wrapping multiple validation errors
+// returned by SelfConfigSource.ValidateAll() if the designated constraints
+// aren't met.
+type SelfConfigSourceMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m SelfConfigSourceMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m SelfConfigSourceMultiError) AllErrors() []error { return m }
+
+// SelfConfigSourceValidationError is the validation error returned by
+// SelfConfigSource.Validate if the designated constraints aren't met.
+type SelfConfigSourceValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e SelfConfigSourceValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e SelfConfigSourceValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e SelfConfigSourceValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e SelfConfigSourceValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e SelfConfigSourceValidationError) ErrorName() string { return "SelfConfigSourceValidationError" }
+
+// Error satisfies the builtin error interface
+func (e SelfConfigSourceValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sSelfConfigSource.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = SelfConfigSourceValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = SelfConfigSourceValidationError{}
+
+// Validate checks the field values on RateLimitSettings with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// first error encountered is returned, or nil if there are no violations.
+func (m *RateLimitSettings) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on RateLimitSettings with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// RateLimitSettingsMultiError, or nil if none found.
+func (m *RateLimitSettings) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *RateLimitSettings) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if all {
+ switch v := interface{}(m.GetMaxTokens()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, RateLimitSettingsValidationError{
+ field: "MaxTokens",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, RateLimitSettingsValidationError{
+ field: "MaxTokens",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetMaxTokens()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return RateLimitSettingsValidationError{
+ field: "MaxTokens",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if wrapper := m.GetFillRate(); wrapper != nil {
+
+ if wrapper.GetValue() <= 0 {
+ err := RateLimitSettingsValidationError{
+ field: "FillRate",
+ reason: "value must be greater than 0",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return RateLimitSettingsMultiError(errors)
+ }
+
+ return nil
+}
+
+// RateLimitSettingsMultiError is an error wrapping multiple validation errors
+// returned by RateLimitSettings.ValidateAll() if the designated constraints
+// aren't met.
+type RateLimitSettingsMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m RateLimitSettingsMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m RateLimitSettingsMultiError) AllErrors() []error { return m }
+
+// RateLimitSettingsValidationError is the validation error returned by
+// RateLimitSettings.Validate if the designated constraints aren't met.
+type RateLimitSettingsValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e RateLimitSettingsValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e RateLimitSettingsValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e RateLimitSettingsValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e RateLimitSettingsValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e RateLimitSettingsValidationError) ErrorName() string {
+ return "RateLimitSettingsValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e RateLimitSettingsValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sRateLimitSettings.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = RateLimitSettingsValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = RateLimitSettingsValidationError{}
+
+// Validate checks the field values on PathConfigSource with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// first error encountered is returned, or nil if there are no violations.
+func (m *PathConfigSource) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on PathConfigSource with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// PathConfigSourceMultiError, or nil if none found.
+func (m *PathConfigSource) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *PathConfigSource) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if utf8.RuneCountInString(m.GetPath()) < 1 {
+ err := PathConfigSourceValidationError{
+ field: "Path",
+ reason: "value length must be at least 1 runes",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetWatchedDirectory()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, PathConfigSourceValidationError{
+ field: "WatchedDirectory",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, PathConfigSourceValidationError{
+ field: "WatchedDirectory",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetWatchedDirectory()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return PathConfigSourceValidationError{
+ field: "WatchedDirectory",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return PathConfigSourceMultiError(errors)
+ }
+
+ return nil
+}
+
+// PathConfigSourceMultiError is an error wrapping multiple validation errors
+// returned by PathConfigSource.ValidateAll() if the designated constraints
+// aren't met.
+type PathConfigSourceMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m PathConfigSourceMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m PathConfigSourceMultiError) AllErrors() []error { return m }
+
+// PathConfigSourceValidationError is the validation error returned by
+// PathConfigSource.Validate if the designated constraints aren't met.
+type PathConfigSourceValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e PathConfigSourceValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e PathConfigSourceValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e PathConfigSourceValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e PathConfigSourceValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e PathConfigSourceValidationError) ErrorName() string { return "PathConfigSourceValidationError" }
+
+// Error satisfies the builtin error interface
+func (e PathConfigSourceValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sPathConfigSource.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = PathConfigSourceValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = PathConfigSourceValidationError{}
+
+// Validate checks the field values on ConfigSource with the rules defined in
+// the proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *ConfigSource) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on ConfigSource with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in ConfigSourceMultiError, or
+// nil if none found.
+func (m *ConfigSource) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *ConfigSource) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ for idx, item := range m.GetAuthorities() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ConfigSourceValidationError{
+ field: fmt.Sprintf("Authorities[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ConfigSourceValidationError{
+ field: fmt.Sprintf("Authorities[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ConfigSourceValidationError{
+ field: fmt.Sprintf("Authorities[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if all {
+ switch v := interface{}(m.GetInitialFetchTimeout()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ConfigSourceValidationError{
+ field: "InitialFetchTimeout",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ConfigSourceValidationError{
+ field: "InitialFetchTimeout",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetInitialFetchTimeout()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ConfigSourceValidationError{
+ field: "InitialFetchTimeout",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if _, ok := ApiVersion_name[int32(m.GetResourceApiVersion())]; !ok {
+ err := ConfigSourceValidationError{
+ field: "ResourceApiVersion",
+ reason: "value must be one of the defined enum values",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ oneofConfigSourceSpecifierPresent := false
+ switch v := m.ConfigSourceSpecifier.(type) {
+ case *ConfigSource_Path:
+ if v == nil {
+ err := ConfigSourceValidationError{
+ field: "ConfigSourceSpecifier",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofConfigSourceSpecifierPresent = true
+ // no validation rules for Path
+ case *ConfigSource_PathConfigSource:
+ if v == nil {
+ err := ConfigSourceValidationError{
+ field: "ConfigSourceSpecifier",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofConfigSourceSpecifierPresent = true
+
+ if all {
+ switch v := interface{}(m.GetPathConfigSource()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ConfigSourceValidationError{
+ field: "PathConfigSource",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ConfigSourceValidationError{
+ field: "PathConfigSource",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetPathConfigSource()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ConfigSourceValidationError{
+ field: "PathConfigSource",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *ConfigSource_ApiConfigSource:
+ if v == nil {
+ err := ConfigSourceValidationError{
+ field: "ConfigSourceSpecifier",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofConfigSourceSpecifierPresent = true
+
+ if all {
+ switch v := interface{}(m.GetApiConfigSource()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ConfigSourceValidationError{
+ field: "ApiConfigSource",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ConfigSourceValidationError{
+ field: "ApiConfigSource",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetApiConfigSource()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ConfigSourceValidationError{
+ field: "ApiConfigSource",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *ConfigSource_Ads:
+ if v == nil {
+ err := ConfigSourceValidationError{
+ field: "ConfigSourceSpecifier",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofConfigSourceSpecifierPresent = true
+
+ if all {
+ switch v := interface{}(m.GetAds()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ConfigSourceValidationError{
+ field: "Ads",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ConfigSourceValidationError{
+ field: "Ads",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetAds()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ConfigSourceValidationError{
+ field: "Ads",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *ConfigSource_Self:
+ if v == nil {
+ err := ConfigSourceValidationError{
+ field: "ConfigSourceSpecifier",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofConfigSourceSpecifierPresent = true
+
+ if all {
+ switch v := interface{}(m.GetSelf()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ConfigSourceValidationError{
+ field: "Self",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ConfigSourceValidationError{
+ field: "Self",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetSelf()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ConfigSourceValidationError{
+ field: "Self",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ default:
+ _ = v // ensures v is used
+ }
+ if !oneofConfigSourceSpecifierPresent {
+ err := ConfigSourceValidationError{
+ field: "ConfigSourceSpecifier",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return ConfigSourceMultiError(errors)
+ }
+
+ return nil
+}
+
+// ConfigSourceMultiError is an error wrapping multiple validation errors
+// returned by ConfigSource.ValidateAll() if the designated constraints aren't met.
+type ConfigSourceMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ConfigSourceMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ConfigSourceMultiError) AllErrors() []error { return m }
+
+// ConfigSourceValidationError is the validation error returned by
+// ConfigSource.Validate if the designated constraints aren't met.
+type ConfigSourceValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ConfigSourceValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ConfigSourceValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ConfigSourceValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ConfigSourceValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ConfigSourceValidationError) ErrorName() string { return "ConfigSourceValidationError" }
+
+// Error satisfies the builtin error interface
+func (e ConfigSourceValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sConfigSource.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ConfigSourceValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ConfigSourceValidationError{}
+
+// Validate checks the field values on ExtensionConfigSource with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *ExtensionConfigSource) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on ExtensionConfigSource with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// ExtensionConfigSourceMultiError, or nil if none found.
+func (m *ExtensionConfigSource) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *ExtensionConfigSource) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if m.GetConfigSource() == nil {
+ err := ExtensionConfigSourceValidationError{
+ field: "ConfigSource",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if a := m.GetConfigSource(); a != nil {
+
+ }
+
+ if all {
+ switch v := interface{}(m.GetDefaultConfig()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ExtensionConfigSourceValidationError{
+ field: "DefaultConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ExtensionConfigSourceValidationError{
+ field: "DefaultConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetDefaultConfig()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ExtensionConfigSourceValidationError{
+ field: "DefaultConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for ApplyDefaultConfigWithoutWarming
+
+ if len(m.GetTypeUrls()) < 1 {
+ err := ExtensionConfigSourceValidationError{
+ field: "TypeUrls",
+ reason: "value must contain at least 1 item(s)",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return ExtensionConfigSourceMultiError(errors)
+ }
+
+ return nil
+}
+
+// ExtensionConfigSourceMultiError is an error wrapping multiple validation
+// errors returned by ExtensionConfigSource.ValidateAll() if the designated
+// constraints aren't met.
+type ExtensionConfigSourceMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ExtensionConfigSourceMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ExtensionConfigSourceMultiError) AllErrors() []error { return m }
+
+// ExtensionConfigSourceValidationError is the validation error returned by
+// ExtensionConfigSource.Validate if the designated constraints aren't met.
+type ExtensionConfigSourceValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ExtensionConfigSourceValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ExtensionConfigSourceValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ExtensionConfigSourceValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ExtensionConfigSourceValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ExtensionConfigSourceValidationError) ErrorName() string {
+ return "ExtensionConfigSourceValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e ExtensionConfigSourceValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sExtensionConfigSource.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ExtensionConfigSourceValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ExtensionConfigSourceValidationError{}
diff --git a/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/event_service_config.pb.go b/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/event_service_config.pb.go
new file mode 100644
index 000000000..c293c92b1
--- /dev/null
+++ b/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/event_service_config.pb.go
@@ -0,0 +1,205 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.23.0
+// protoc v4.23.1
+// source: envoy/config/core/v3/event_service_config.proto
+
+package corev3
+
+import (
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ proto "github.com/golang/protobuf/proto"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+// [#not-implemented-hide:]
+// Configuration of the event reporting service endpoint.
+type EventServiceConfig struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to ConfigSourceSpecifier:
+ //
+ // *EventServiceConfig_GrpcService
+ ConfigSourceSpecifier isEventServiceConfig_ConfigSourceSpecifier `protobuf_oneof:"config_source_specifier"`
+}
+
+func (x *EventServiceConfig) Reset() {
+ *x = EventServiceConfig{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_event_service_config_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *EventServiceConfig) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*EventServiceConfig) ProtoMessage() {}
+
+func (x *EventServiceConfig) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_event_service_config_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use EventServiceConfig.ProtoReflect.Descriptor instead.
+func (*EventServiceConfig) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_event_service_config_proto_rawDescGZIP(), []int{0}
+}
+
+func (m *EventServiceConfig) GetConfigSourceSpecifier() isEventServiceConfig_ConfigSourceSpecifier {
+ if m != nil {
+ return m.ConfigSourceSpecifier
+ }
+ return nil
+}
+
+func (x *EventServiceConfig) GetGrpcService() *GrpcService {
+ if x, ok := x.GetConfigSourceSpecifier().(*EventServiceConfig_GrpcService); ok {
+ return x.GrpcService
+ }
+ return nil
+}
+
+type isEventServiceConfig_ConfigSourceSpecifier interface {
+ isEventServiceConfig_ConfigSourceSpecifier()
+}
+
+type EventServiceConfig_GrpcService struct {
+ // Specifies the gRPC service that hosts the event reporting service.
+ GrpcService *GrpcService `protobuf:"bytes,1,opt,name=grpc_service,json=grpcService,proto3,oneof"`
+}
+
+func (*EventServiceConfig_GrpcService) isEventServiceConfig_ConfigSourceSpecifier() {}
+
+var File_envoy_config_core_v3_event_service_config_proto protoreflect.FileDescriptor
+
+var file_envoy_config_core_v3_event_service_config_proto_rawDesc = []byte{
+ 0x0a, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63,
+ 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x12, 0x14, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e,
+ 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x67, 0x72,
+ 0x70, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
+ 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c,
+ 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa9, 0x01, 0x0a, 0x12,
+ 0x45, 0x76, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x12, 0x46, 0x0a, 0x0c, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69,
+ 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e,
+ 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x48, 0x00, 0x52, 0x0b, 0x67,
+ 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x3a, 0x2b, 0x9a, 0xc5, 0x88, 0x1e,
+ 0x26, 0x0a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e,
+ 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
+ 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x1e, 0x0a, 0x17, 0x63, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69,
+ 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x42, 0x8b, 0x01, 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x65,
+ 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e,
+ 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x17,
+ 0x45, 0x76, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75,
+ 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79,
+ 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e,
+ 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63,
+ 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x63, 0x6f, 0x72, 0x65, 0x76, 0x33, 0xba, 0x80, 0xc8,
+ 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_config_core_v3_event_service_config_proto_rawDescOnce sync.Once
+ file_envoy_config_core_v3_event_service_config_proto_rawDescData = file_envoy_config_core_v3_event_service_config_proto_rawDesc
+)
+
+func file_envoy_config_core_v3_event_service_config_proto_rawDescGZIP() []byte {
+ file_envoy_config_core_v3_event_service_config_proto_rawDescOnce.Do(func() {
+ file_envoy_config_core_v3_event_service_config_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_core_v3_event_service_config_proto_rawDescData)
+ })
+ return file_envoy_config_core_v3_event_service_config_proto_rawDescData
+}
+
+var file_envoy_config_core_v3_event_service_config_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_envoy_config_core_v3_event_service_config_proto_goTypes = []interface{}{
+ (*EventServiceConfig)(nil), // 0: envoy.config.core.v3.EventServiceConfig
+ (*GrpcService)(nil), // 1: envoy.config.core.v3.GrpcService
+}
+var file_envoy_config_core_v3_event_service_config_proto_depIdxs = []int32{
+ 1, // 0: envoy.config.core.v3.EventServiceConfig.grpc_service:type_name -> envoy.config.core.v3.GrpcService
+ 1, // [1:1] is the sub-list for method output_type
+ 1, // [1:1] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 1, // [1:1] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_envoy_config_core_v3_event_service_config_proto_init() }
+func file_envoy_config_core_v3_event_service_config_proto_init() {
+ if File_envoy_config_core_v3_event_service_config_proto != nil {
+ return
+ }
+ file_envoy_config_core_v3_grpc_service_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_config_core_v3_event_service_config_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*EventServiceConfig); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_envoy_config_core_v3_event_service_config_proto_msgTypes[0].OneofWrappers = []interface{}{
+ (*EventServiceConfig_GrpcService)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_config_core_v3_event_service_config_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_config_core_v3_event_service_config_proto_goTypes,
+ DependencyIndexes: file_envoy_config_core_v3_event_service_config_proto_depIdxs,
+ MessageInfos: file_envoy_config_core_v3_event_service_config_proto_msgTypes,
+ }.Build()
+ File_envoy_config_core_v3_event_service_config_proto = out.File
+ file_envoy_config_core_v3_event_service_config_proto_rawDesc = nil
+ file_envoy_config_core_v3_event_service_config_proto_goTypes = nil
+ file_envoy_config_core_v3_event_service_config_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/event_service_config.pb.validate.go b/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/event_service_config.pb.validate.go
new file mode 100644
index 000000000..a55672c27
--- /dev/null
+++ b/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/event_service_config.pb.validate.go
@@ -0,0 +1,196 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/config/core/v3/event_service_config.proto
+
+package corev3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on EventServiceConfig with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *EventServiceConfig) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on EventServiceConfig with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// EventServiceConfigMultiError, or nil if none found.
+func (m *EventServiceConfig) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *EventServiceConfig) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ oneofConfigSourceSpecifierPresent := false
+ switch v := m.ConfigSourceSpecifier.(type) {
+ case *EventServiceConfig_GrpcService:
+ if v == nil {
+ err := EventServiceConfigValidationError{
+ field: "ConfigSourceSpecifier",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofConfigSourceSpecifierPresent = true
+
+ if all {
+ switch v := interface{}(m.GetGrpcService()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, EventServiceConfigValidationError{
+ field: "GrpcService",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, EventServiceConfigValidationError{
+ field: "GrpcService",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetGrpcService()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return EventServiceConfigValidationError{
+ field: "GrpcService",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ default:
+ _ = v // ensures v is used
+ }
+ if !oneofConfigSourceSpecifierPresent {
+ err := EventServiceConfigValidationError{
+ field: "ConfigSourceSpecifier",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return EventServiceConfigMultiError(errors)
+ }
+
+ return nil
+}
+
+// EventServiceConfigMultiError is an error wrapping multiple validation errors
+// returned by EventServiceConfig.ValidateAll() if the designated constraints
+// aren't met.
+type EventServiceConfigMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m EventServiceConfigMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m EventServiceConfigMultiError) AllErrors() []error { return m }
+
+// EventServiceConfigValidationError is the validation error returned by
+// EventServiceConfig.Validate if the designated constraints aren't met.
+type EventServiceConfigValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e EventServiceConfigValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e EventServiceConfigValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e EventServiceConfigValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e EventServiceConfigValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e EventServiceConfigValidationError) ErrorName() string {
+ return "EventServiceConfigValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e EventServiceConfigValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sEventServiceConfig.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = EventServiceConfigValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = EventServiceConfigValidationError{}
diff --git a/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/extension.pb.go b/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/extension.pb.go
new file mode 100644
index 000000000..bb691cdd5
--- /dev/null
+++ b/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/extension.pb.go
@@ -0,0 +1,190 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.23.0
+// protoc v4.23.1
+// source: envoy/config/core/v3/extension.proto
+
+package corev3
+
+import (
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ proto "github.com/golang/protobuf/proto"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+// Message type for extension configuration.
+// [#next-major-version: revisit all existing typed_config that doesn't use this wrapper.].
+type TypedExtensionConfig struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The name of an extension. This is not used to select the extension, instead
+ // it serves the role of an opaque identifier.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // The typed config for the extension. The type URL will be used to identify
+ // the extension. In the case that the type URL is “xds.type.v3.TypedStruct“
+ // (or, for historical reasons, “udpa.type.v1.TypedStruct“), the inner type
+ // URL of “TypedStruct“ will be utilized. See the
+ // :ref:`extension configuration overview
+ // ` for further details.
+ TypedConfig *anypb.Any `protobuf:"bytes,2,opt,name=typed_config,json=typedConfig,proto3" json:"typed_config,omitempty"`
+}
+
+func (x *TypedExtensionConfig) Reset() {
+ *x = TypedExtensionConfig{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_extension_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *TypedExtensionConfig) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TypedExtensionConfig) ProtoMessage() {}
+
+func (x *TypedExtensionConfig) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_extension_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TypedExtensionConfig.ProtoReflect.Descriptor instead.
+func (*TypedExtensionConfig) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_extension_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *TypedExtensionConfig) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *TypedExtensionConfig) GetTypedConfig() *anypb.Any {
+ if x != nil {
+ return x.TypedConfig
+ }
+ return nil
+}
+
+var File_envoy_config_core_v3_extension_proto protoreflect.FileDescriptor
+
+var file_envoy_config_core_v3_extension_proto_rawDesc = []byte{
+ 0x0a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63,
+ 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x19, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e,
+ 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e,
+ 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65,
+ 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22,
+ 0x76, 0x0a, 0x14, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f,
+ 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04,
+ 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x41, 0x0a, 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79,
+ 0x42, 0x08, 0xfa, 0x42, 0x05, 0xa2, 0x01, 0x02, 0x08, 0x01, 0x52, 0x0b, 0x74, 0x79, 0x70, 0x65,
+ 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x82, 0x01, 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x65,
+ 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e,
+ 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x0e,
+ 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01,
+ 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72,
+ 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x63, 0x6f,
+ 0x72, 0x65, 0x76, 0x33, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_config_core_v3_extension_proto_rawDescOnce sync.Once
+ file_envoy_config_core_v3_extension_proto_rawDescData = file_envoy_config_core_v3_extension_proto_rawDesc
+)
+
+func file_envoy_config_core_v3_extension_proto_rawDescGZIP() []byte {
+ file_envoy_config_core_v3_extension_proto_rawDescOnce.Do(func() {
+ file_envoy_config_core_v3_extension_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_core_v3_extension_proto_rawDescData)
+ })
+ return file_envoy_config_core_v3_extension_proto_rawDescData
+}
+
+var file_envoy_config_core_v3_extension_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_envoy_config_core_v3_extension_proto_goTypes = []interface{}{
+ (*TypedExtensionConfig)(nil), // 0: envoy.config.core.v3.TypedExtensionConfig
+ (*anypb.Any)(nil), // 1: google.protobuf.Any
+}
+var file_envoy_config_core_v3_extension_proto_depIdxs = []int32{
+ 1, // 0: envoy.config.core.v3.TypedExtensionConfig.typed_config:type_name -> google.protobuf.Any
+ 1, // [1:1] is the sub-list for method output_type
+ 1, // [1:1] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 1, // [1:1] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_envoy_config_core_v3_extension_proto_init() }
+func file_envoy_config_core_v3_extension_proto_init() {
+ if File_envoy_config_core_v3_extension_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_config_core_v3_extension_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*TypedExtensionConfig); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_config_core_v3_extension_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_config_core_v3_extension_proto_goTypes,
+ DependencyIndexes: file_envoy_config_core_v3_extension_proto_depIdxs,
+ MessageInfos: file_envoy_config_core_v3_extension_proto_msgTypes,
+ }.Build()
+ File_envoy_config_core_v3_extension_proto = out.File
+ file_envoy_config_core_v3_extension_proto_rawDesc = nil
+ file_envoy_config_core_v3_extension_proto_goTypes = nil
+ file_envoy_config_core_v3_extension_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/extension.pb.validate.go b/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/extension.pb.validate.go
new file mode 100644
index 000000000..9c915d383
--- /dev/null
+++ b/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/extension.pb.validate.go
@@ -0,0 +1,164 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/config/core/v3/extension.proto
+
+package corev3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on TypedExtensionConfig with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *TypedExtensionConfig) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on TypedExtensionConfig with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// TypedExtensionConfigMultiError, or nil if none found.
+func (m *TypedExtensionConfig) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *TypedExtensionConfig) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if utf8.RuneCountInString(m.GetName()) < 1 {
+ err := TypedExtensionConfigValidationError{
+ field: "Name",
+ reason: "value length must be at least 1 runes",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if m.GetTypedConfig() == nil {
+ err := TypedExtensionConfigValidationError{
+ field: "TypedConfig",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if a := m.GetTypedConfig(); a != nil {
+
+ }
+
+ if len(errors) > 0 {
+ return TypedExtensionConfigMultiError(errors)
+ }
+
+ return nil
+}
+
+// TypedExtensionConfigMultiError is an error wrapping multiple validation
+// errors returned by TypedExtensionConfig.ValidateAll() if the designated
+// constraints aren't met.
+type TypedExtensionConfigMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m TypedExtensionConfigMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m TypedExtensionConfigMultiError) AllErrors() []error { return m }
+
+// TypedExtensionConfigValidationError is the validation error returned by
+// TypedExtensionConfig.Validate if the designated constraints aren't met.
+type TypedExtensionConfigValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e TypedExtensionConfigValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e TypedExtensionConfigValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e TypedExtensionConfigValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e TypedExtensionConfigValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e TypedExtensionConfigValidationError) ErrorName() string {
+ return "TypedExtensionConfigValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e TypedExtensionConfigValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sTypedExtensionConfig.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = TypedExtensionConfigValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = TypedExtensionConfigValidationError{}
diff --git a/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/grpc_method_list.pb.go b/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/grpc_method_list.pb.go
new file mode 100644
index 000000000..8fc3cdb75
--- /dev/null
+++ b/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/grpc_method_list.pb.go
@@ -0,0 +1,251 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.23.0
+// protoc v4.23.1
+// source: envoy/config/core/v3/grpc_method_list.proto
+
+package corev3
+
+import (
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ proto "github.com/golang/protobuf/proto"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+// A list of gRPC methods which can be used as an allowlist, for example.
+type GrpcMethodList struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Services []*GrpcMethodList_Service `protobuf:"bytes,1,rep,name=services,proto3" json:"services,omitempty"`
+}
+
+func (x *GrpcMethodList) Reset() {
+ *x = GrpcMethodList{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_grpc_method_list_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GrpcMethodList) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GrpcMethodList) ProtoMessage() {}
+
+func (x *GrpcMethodList) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_grpc_method_list_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GrpcMethodList.ProtoReflect.Descriptor instead.
+func (*GrpcMethodList) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_grpc_method_list_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *GrpcMethodList) GetServices() []*GrpcMethodList_Service {
+ if x != nil {
+ return x.Services
+ }
+ return nil
+}
+
+type GrpcMethodList_Service struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The name of the gRPC service.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // The names of the gRPC methods in this service.
+ MethodNames []string `protobuf:"bytes,2,rep,name=method_names,json=methodNames,proto3" json:"method_names,omitempty"`
+}
+
+func (x *GrpcMethodList_Service) Reset() {
+ *x = GrpcMethodList_Service{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_grpc_method_list_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GrpcMethodList_Service) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GrpcMethodList_Service) ProtoMessage() {}
+
+func (x *GrpcMethodList_Service) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_grpc_method_list_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GrpcMethodList_Service.ProtoReflect.Descriptor instead.
+func (*GrpcMethodList_Service) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_grpc_method_list_proto_rawDescGZIP(), []int{0, 0}
+}
+
+func (x *GrpcMethodList_Service) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *GrpcMethodList_Service) GetMethodNames() []string {
+ if x != nil {
+ return x.MethodNames
+ }
+ return nil
+}
+
+var File_envoy_config_core_v3_grpc_method_list_proto protoreflect.FileDescriptor
+
+var file_envoy_config_core_v3_grpc_method_list_proto_rawDesc = []byte{
+ 0x0a, 0x2b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63,
+ 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x6d, 0x65, 0x74, 0x68,
+ 0x6f, 0x64, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x65,
+ 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65,
+ 0x2e, 0x76, 0x33, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f,
+ 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x8a,
+ 0x02, 0x0a, 0x0e, 0x47, 0x72, 0x70, 0x63, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4c, 0x69, 0x73,
+ 0x74, 0x12, 0x48, 0x0a, 0x08, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4d,
+ 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4c, 0x69, 0x73, 0x74, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
+ 0x65, 0x52, 0x08, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x1a, 0x84, 0x01, 0x0a, 0x07,
+ 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04,
+ 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2b, 0x0a, 0x0c, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x6e,
+ 0x61, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92,
+ 0x01, 0x02, 0x08, 0x01, 0x52, 0x0b, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4e, 0x61, 0x6d, 0x65,
+ 0x73, 0x3a, 0x2f, 0x9a, 0xc5, 0x88, 0x1e, 0x2a, 0x0a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e,
+ 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70, 0x63,
+ 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4c, 0x69, 0x73, 0x74, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69,
+ 0x63, 0x65, 0x3a, 0x27, 0x9a, 0xc5, 0x88, 0x1e, 0x22, 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70,
+ 0x63, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x87, 0x01, 0x0a, 0x22,
+ 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e,
+ 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e,
+ 0x76, 0x33, 0x42, 0x13, 0x47, 0x72, 0x70, 0x63, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4c, 0x69,
+ 0x73, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75,
+ 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79,
+ 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e,
+ 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63,
+ 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x63, 0x6f, 0x72, 0x65, 0x76, 0x33, 0xba, 0x80, 0xc8,
+ 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_config_core_v3_grpc_method_list_proto_rawDescOnce sync.Once
+ file_envoy_config_core_v3_grpc_method_list_proto_rawDescData = file_envoy_config_core_v3_grpc_method_list_proto_rawDesc
+)
+
+func file_envoy_config_core_v3_grpc_method_list_proto_rawDescGZIP() []byte {
+ file_envoy_config_core_v3_grpc_method_list_proto_rawDescOnce.Do(func() {
+ file_envoy_config_core_v3_grpc_method_list_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_core_v3_grpc_method_list_proto_rawDescData)
+ })
+ return file_envoy_config_core_v3_grpc_method_list_proto_rawDescData
+}
+
+var file_envoy_config_core_v3_grpc_method_list_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_envoy_config_core_v3_grpc_method_list_proto_goTypes = []interface{}{
+ (*GrpcMethodList)(nil), // 0: envoy.config.core.v3.GrpcMethodList
+ (*GrpcMethodList_Service)(nil), // 1: envoy.config.core.v3.GrpcMethodList.Service
+}
+var file_envoy_config_core_v3_grpc_method_list_proto_depIdxs = []int32{
+ 1, // 0: envoy.config.core.v3.GrpcMethodList.services:type_name -> envoy.config.core.v3.GrpcMethodList.Service
+ 1, // [1:1] is the sub-list for method output_type
+ 1, // [1:1] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 1, // [1:1] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_envoy_config_core_v3_grpc_method_list_proto_init() }
+func file_envoy_config_core_v3_grpc_method_list_proto_init() {
+ if File_envoy_config_core_v3_grpc_method_list_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_config_core_v3_grpc_method_list_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GrpcMethodList); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_grpc_method_list_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GrpcMethodList_Service); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_config_core_v3_grpc_method_list_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 2,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_config_core_v3_grpc_method_list_proto_goTypes,
+ DependencyIndexes: file_envoy_config_core_v3_grpc_method_list_proto_depIdxs,
+ MessageInfos: file_envoy_config_core_v3_grpc_method_list_proto_msgTypes,
+ }.Build()
+ File_envoy_config_core_v3_grpc_method_list_proto = out.File
+ file_envoy_config_core_v3_grpc_method_list_proto_rawDesc = nil
+ file_envoy_config_core_v3_grpc_method_list_proto_goTypes = nil
+ file_envoy_config_core_v3_grpc_method_list_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/grpc_method_list.pb.validate.go b/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/grpc_method_list.pb.validate.go
new file mode 100644
index 000000000..4fc134e94
--- /dev/null
+++ b/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/grpc_method_list.pb.validate.go
@@ -0,0 +1,294 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/config/core/v3/grpc_method_list.proto
+
+package corev3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on GrpcMethodList with the rules defined in
+// the proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *GrpcMethodList) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on GrpcMethodList with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in GrpcMethodListMultiError,
+// or nil if none found.
+func (m *GrpcMethodList) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *GrpcMethodList) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ for idx, item := range m.GetServices() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, GrpcMethodListValidationError{
+ field: fmt.Sprintf("Services[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, GrpcMethodListValidationError{
+ field: fmt.Sprintf("Services[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return GrpcMethodListValidationError{
+ field: fmt.Sprintf("Services[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return GrpcMethodListMultiError(errors)
+ }
+
+ return nil
+}
+
+// GrpcMethodListMultiError is an error wrapping multiple validation errors
+// returned by GrpcMethodList.ValidateAll() if the designated constraints
+// aren't met.
+type GrpcMethodListMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m GrpcMethodListMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m GrpcMethodListMultiError) AllErrors() []error { return m }
+
+// GrpcMethodListValidationError is the validation error returned by
+// GrpcMethodList.Validate if the designated constraints aren't met.
+type GrpcMethodListValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e GrpcMethodListValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e GrpcMethodListValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e GrpcMethodListValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e GrpcMethodListValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e GrpcMethodListValidationError) ErrorName() string { return "GrpcMethodListValidationError" }
+
+// Error satisfies the builtin error interface
+func (e GrpcMethodListValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sGrpcMethodList.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = GrpcMethodListValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = GrpcMethodListValidationError{}
+
+// Validate checks the field values on GrpcMethodList_Service with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *GrpcMethodList_Service) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on GrpcMethodList_Service with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// GrpcMethodList_ServiceMultiError, or nil if none found.
+func (m *GrpcMethodList_Service) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *GrpcMethodList_Service) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if utf8.RuneCountInString(m.GetName()) < 1 {
+ err := GrpcMethodList_ServiceValidationError{
+ field: "Name",
+ reason: "value length must be at least 1 runes",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if len(m.GetMethodNames()) < 1 {
+ err := GrpcMethodList_ServiceValidationError{
+ field: "MethodNames",
+ reason: "value must contain at least 1 item(s)",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return GrpcMethodList_ServiceMultiError(errors)
+ }
+
+ return nil
+}
+
+// GrpcMethodList_ServiceMultiError is an error wrapping multiple validation
+// errors returned by GrpcMethodList_Service.ValidateAll() if the designated
+// constraints aren't met.
+type GrpcMethodList_ServiceMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m GrpcMethodList_ServiceMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m GrpcMethodList_ServiceMultiError) AllErrors() []error { return m }
+
+// GrpcMethodList_ServiceValidationError is the validation error returned by
+// GrpcMethodList_Service.Validate if the designated constraints aren't met.
+type GrpcMethodList_ServiceValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e GrpcMethodList_ServiceValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e GrpcMethodList_ServiceValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e GrpcMethodList_ServiceValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e GrpcMethodList_ServiceValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e GrpcMethodList_ServiceValidationError) ErrorName() string {
+ return "GrpcMethodList_ServiceValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e GrpcMethodList_ServiceValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sGrpcMethodList_Service.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = GrpcMethodList_ServiceValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = GrpcMethodList_ServiceValidationError{}
diff --git a/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/grpc_service.pb.go b/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/grpc_service.pb.go
new file mode 100644
index 000000000..47195db14
--- /dev/null
+++ b/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/grpc_service.pb.go
@@ -0,0 +1,1769 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.23.0
+// protoc v4.23.1
+// source: envoy/config/core/v3/grpc_service.proto
+
+package corev3
+
+import (
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ proto "github.com/golang/protobuf/proto"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ durationpb "google.golang.org/protobuf/types/known/durationpb"
+ emptypb "google.golang.org/protobuf/types/known/emptypb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+ wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+// gRPC service configuration. This is used by :ref:`ApiConfigSource
+// ` and filter configurations.
+// [#next-free-field: 6]
+type GrpcService struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to TargetSpecifier:
+ //
+ // *GrpcService_EnvoyGrpc_
+ // *GrpcService_GoogleGrpc_
+ TargetSpecifier isGrpcService_TargetSpecifier `protobuf_oneof:"target_specifier"`
+ // The timeout for the gRPC request. This is the timeout for a specific
+ // request.
+ Timeout *durationpb.Duration `protobuf:"bytes,3,opt,name=timeout,proto3" json:"timeout,omitempty"`
+ // Additional metadata to include in streams initiated to the GrpcService. This can be used for
+ // scenarios in which additional ad hoc authorization headers (e.g. “x-foo-bar: baz-key“) are to
+ // be injected. For more information, including details on header value syntax, see the
+ // documentation on :ref:`custom request headers
+ // `.
+ InitialMetadata []*HeaderValue `protobuf:"bytes,5,rep,name=initial_metadata,json=initialMetadata,proto3" json:"initial_metadata,omitempty"`
+}
+
+func (x *GrpcService) Reset() {
+ *x = GrpcService{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GrpcService) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GrpcService) ProtoMessage() {}
+
+func (x *GrpcService) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GrpcService.ProtoReflect.Descriptor instead.
+func (*GrpcService) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_grpc_service_proto_rawDescGZIP(), []int{0}
+}
+
+func (m *GrpcService) GetTargetSpecifier() isGrpcService_TargetSpecifier {
+ if m != nil {
+ return m.TargetSpecifier
+ }
+ return nil
+}
+
+func (x *GrpcService) GetEnvoyGrpc() *GrpcService_EnvoyGrpc {
+ if x, ok := x.GetTargetSpecifier().(*GrpcService_EnvoyGrpc_); ok {
+ return x.EnvoyGrpc
+ }
+ return nil
+}
+
+func (x *GrpcService) GetGoogleGrpc() *GrpcService_GoogleGrpc {
+ if x, ok := x.GetTargetSpecifier().(*GrpcService_GoogleGrpc_); ok {
+ return x.GoogleGrpc
+ }
+ return nil
+}
+
+func (x *GrpcService) GetTimeout() *durationpb.Duration {
+ if x != nil {
+ return x.Timeout
+ }
+ return nil
+}
+
+func (x *GrpcService) GetInitialMetadata() []*HeaderValue {
+ if x != nil {
+ return x.InitialMetadata
+ }
+ return nil
+}
+
+type isGrpcService_TargetSpecifier interface {
+ isGrpcService_TargetSpecifier()
+}
+
+type GrpcService_EnvoyGrpc_ struct {
+ // Envoy's in-built gRPC client.
+ // See the :ref:`gRPC services overview `
+ // documentation for discussion on gRPC client selection.
+ EnvoyGrpc *GrpcService_EnvoyGrpc `protobuf:"bytes,1,opt,name=envoy_grpc,json=envoyGrpc,proto3,oneof"`
+}
+
+type GrpcService_GoogleGrpc_ struct {
+ // `Google C++ gRPC client `_
+ // See the :ref:`gRPC services overview `
+ // documentation for discussion on gRPC client selection.
+ GoogleGrpc *GrpcService_GoogleGrpc `protobuf:"bytes,2,opt,name=google_grpc,json=googleGrpc,proto3,oneof"`
+}
+
+func (*GrpcService_EnvoyGrpc_) isGrpcService_TargetSpecifier() {}
+
+func (*GrpcService_GoogleGrpc_) isGrpcService_TargetSpecifier() {}
+
+type GrpcService_EnvoyGrpc struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The name of the upstream gRPC cluster. SSL credentials will be supplied
+ // in the :ref:`Cluster ` :ref:`transport_socket
+ // `.
+ ClusterName string `protobuf:"bytes,1,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"`
+ // The “:authority“ header in the grpc request. If this field is not set, the authority header value will be “cluster_name“.
+ // Note that this authority does not override the SNI. The SNI is provided by the transport socket of the cluster.
+ Authority string `protobuf:"bytes,2,opt,name=authority,proto3" json:"authority,omitempty"`
+ // Indicates the retry policy for re-establishing the gRPC stream
+ // This field is optional. If max interval is not provided, it will be set to ten times the provided base interval.
+ // Currently only supported for xDS gRPC streams.
+ // If not set, xDS gRPC streams default base interval:500ms, maximum interval:30s will be applied.
+ RetryPolicy *RetryPolicy `protobuf:"bytes,3,opt,name=retry_policy,json=retryPolicy,proto3" json:"retry_policy,omitempty"`
+}
+
+func (x *GrpcService_EnvoyGrpc) Reset() {
+ *x = GrpcService_EnvoyGrpc{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GrpcService_EnvoyGrpc) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GrpcService_EnvoyGrpc) ProtoMessage() {}
+
+func (x *GrpcService_EnvoyGrpc) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GrpcService_EnvoyGrpc.ProtoReflect.Descriptor instead.
+func (*GrpcService_EnvoyGrpc) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_grpc_service_proto_rawDescGZIP(), []int{0, 0}
+}
+
+func (x *GrpcService_EnvoyGrpc) GetClusterName() string {
+ if x != nil {
+ return x.ClusterName
+ }
+ return ""
+}
+
+func (x *GrpcService_EnvoyGrpc) GetAuthority() string {
+ if x != nil {
+ return x.Authority
+ }
+ return ""
+}
+
+func (x *GrpcService_EnvoyGrpc) GetRetryPolicy() *RetryPolicy {
+ if x != nil {
+ return x.RetryPolicy
+ }
+ return nil
+}
+
+// [#next-free-field: 9]
+type GrpcService_GoogleGrpc struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The target URI when using the `Google C++ gRPC client
+ // `_. SSL credentials will be supplied in
+ // :ref:`channel_credentials `.
+ TargetUri string `protobuf:"bytes,1,opt,name=target_uri,json=targetUri,proto3" json:"target_uri,omitempty"`
+ ChannelCredentials *GrpcService_GoogleGrpc_ChannelCredentials `protobuf:"bytes,2,opt,name=channel_credentials,json=channelCredentials,proto3" json:"channel_credentials,omitempty"`
+ // A set of call credentials that can be composed with `channel credentials
+ // `_.
+ CallCredentials []*GrpcService_GoogleGrpc_CallCredentials `protobuf:"bytes,3,rep,name=call_credentials,json=callCredentials,proto3" json:"call_credentials,omitempty"`
+ // The human readable prefix to use when emitting statistics for the gRPC
+ // service.
+ //
+ // .. csv-table::
+ //
+ // :header: Name, Type, Description
+ // :widths: 1, 1, 2
+ //
+ // streams_total, Counter, Total number of streams opened
+ // streams_closed_, Counter, Total streams closed with
+ StatPrefix string `protobuf:"bytes,4,opt,name=stat_prefix,json=statPrefix,proto3" json:"stat_prefix,omitempty"`
+ // The name of the Google gRPC credentials factory to use. This must have been registered with
+ // Envoy. If this is empty, a default credentials factory will be used that sets up channel
+ // credentials based on other configuration parameters.
+ CredentialsFactoryName string `protobuf:"bytes,5,opt,name=credentials_factory_name,json=credentialsFactoryName,proto3" json:"credentials_factory_name,omitempty"`
+ // Additional configuration for site-specific customizations of the Google
+ // gRPC library.
+ Config *structpb.Struct `protobuf:"bytes,6,opt,name=config,proto3" json:"config,omitempty"`
+ // How many bytes each stream can buffer internally.
+ // If not set an implementation defined default is applied (1MiB).
+ PerStreamBufferLimitBytes *wrapperspb.UInt32Value `protobuf:"bytes,7,opt,name=per_stream_buffer_limit_bytes,json=perStreamBufferLimitBytes,proto3" json:"per_stream_buffer_limit_bytes,omitempty"`
+ // Custom channels args.
+ ChannelArgs *GrpcService_GoogleGrpc_ChannelArgs `protobuf:"bytes,8,opt,name=channel_args,json=channelArgs,proto3" json:"channel_args,omitempty"`
+}
+
+func (x *GrpcService_GoogleGrpc) Reset() {
+ *x = GrpcService_GoogleGrpc{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GrpcService_GoogleGrpc) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GrpcService_GoogleGrpc) ProtoMessage() {}
+
+func (x *GrpcService_GoogleGrpc) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GrpcService_GoogleGrpc.ProtoReflect.Descriptor instead.
+func (*GrpcService_GoogleGrpc) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_grpc_service_proto_rawDescGZIP(), []int{0, 1}
+}
+
+func (x *GrpcService_GoogleGrpc) GetTargetUri() string {
+ if x != nil {
+ return x.TargetUri
+ }
+ return ""
+}
+
+func (x *GrpcService_GoogleGrpc) GetChannelCredentials() *GrpcService_GoogleGrpc_ChannelCredentials {
+ if x != nil {
+ return x.ChannelCredentials
+ }
+ return nil
+}
+
+func (x *GrpcService_GoogleGrpc) GetCallCredentials() []*GrpcService_GoogleGrpc_CallCredentials {
+ if x != nil {
+ return x.CallCredentials
+ }
+ return nil
+}
+
+func (x *GrpcService_GoogleGrpc) GetStatPrefix() string {
+ if x != nil {
+ return x.StatPrefix
+ }
+ return ""
+}
+
+func (x *GrpcService_GoogleGrpc) GetCredentialsFactoryName() string {
+ if x != nil {
+ return x.CredentialsFactoryName
+ }
+ return ""
+}
+
+func (x *GrpcService_GoogleGrpc) GetConfig() *structpb.Struct {
+ if x != nil {
+ return x.Config
+ }
+ return nil
+}
+
+func (x *GrpcService_GoogleGrpc) GetPerStreamBufferLimitBytes() *wrapperspb.UInt32Value {
+ if x != nil {
+ return x.PerStreamBufferLimitBytes
+ }
+ return nil
+}
+
+func (x *GrpcService_GoogleGrpc) GetChannelArgs() *GrpcService_GoogleGrpc_ChannelArgs {
+ if x != nil {
+ return x.ChannelArgs
+ }
+ return nil
+}
+
+// See https://grpc.io/grpc/cpp/structgrpc_1_1_ssl_credentials_options.html.
+type GrpcService_GoogleGrpc_SslCredentials struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // PEM encoded server root certificates.
+ RootCerts *DataSource `protobuf:"bytes,1,opt,name=root_certs,json=rootCerts,proto3" json:"root_certs,omitempty"`
+ // PEM encoded client private key.
+ PrivateKey *DataSource `protobuf:"bytes,2,opt,name=private_key,json=privateKey,proto3" json:"private_key,omitempty"`
+ // PEM encoded client certificate chain.
+ CertChain *DataSource `protobuf:"bytes,3,opt,name=cert_chain,json=certChain,proto3" json:"cert_chain,omitempty"`
+}
+
+func (x *GrpcService_GoogleGrpc_SslCredentials) Reset() {
+ *x = GrpcService_GoogleGrpc_SslCredentials{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GrpcService_GoogleGrpc_SslCredentials) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GrpcService_GoogleGrpc_SslCredentials) ProtoMessage() {}
+
+func (x *GrpcService_GoogleGrpc_SslCredentials) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GrpcService_GoogleGrpc_SslCredentials.ProtoReflect.Descriptor instead.
+func (*GrpcService_GoogleGrpc_SslCredentials) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_grpc_service_proto_rawDescGZIP(), []int{0, 1, 0}
+}
+
+func (x *GrpcService_GoogleGrpc_SslCredentials) GetRootCerts() *DataSource {
+ if x != nil {
+ return x.RootCerts
+ }
+ return nil
+}
+
+func (x *GrpcService_GoogleGrpc_SslCredentials) GetPrivateKey() *DataSource {
+ if x != nil {
+ return x.PrivateKey
+ }
+ return nil
+}
+
+func (x *GrpcService_GoogleGrpc_SslCredentials) GetCertChain() *DataSource {
+ if x != nil {
+ return x.CertChain
+ }
+ return nil
+}
+
+// Local channel credentials. Only UDS is supported for now.
+// See https://github.com/grpc/grpc/pull/15909.
+type GrpcService_GoogleGrpc_GoogleLocalCredentials struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *GrpcService_GoogleGrpc_GoogleLocalCredentials) Reset() {
+ *x = GrpcService_GoogleGrpc_GoogleLocalCredentials{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GrpcService_GoogleGrpc_GoogleLocalCredentials) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GrpcService_GoogleGrpc_GoogleLocalCredentials) ProtoMessage() {}
+
+func (x *GrpcService_GoogleGrpc_GoogleLocalCredentials) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GrpcService_GoogleGrpc_GoogleLocalCredentials.ProtoReflect.Descriptor instead.
+func (*GrpcService_GoogleGrpc_GoogleLocalCredentials) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_grpc_service_proto_rawDescGZIP(), []int{0, 1, 1}
+}
+
+// See https://grpc.io/docs/guides/auth.html#credential-types to understand Channel and Call
+// credential types.
+type GrpcService_GoogleGrpc_ChannelCredentials struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to CredentialSpecifier:
+ //
+ // *GrpcService_GoogleGrpc_ChannelCredentials_SslCredentials
+ // *GrpcService_GoogleGrpc_ChannelCredentials_GoogleDefault
+ // *GrpcService_GoogleGrpc_ChannelCredentials_LocalCredentials
+ CredentialSpecifier isGrpcService_GoogleGrpc_ChannelCredentials_CredentialSpecifier `protobuf_oneof:"credential_specifier"`
+}
+
+func (x *GrpcService_GoogleGrpc_ChannelCredentials) Reset() {
+ *x = GrpcService_GoogleGrpc_ChannelCredentials{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GrpcService_GoogleGrpc_ChannelCredentials) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GrpcService_GoogleGrpc_ChannelCredentials) ProtoMessage() {}
+
+func (x *GrpcService_GoogleGrpc_ChannelCredentials) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GrpcService_GoogleGrpc_ChannelCredentials.ProtoReflect.Descriptor instead.
+func (*GrpcService_GoogleGrpc_ChannelCredentials) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_grpc_service_proto_rawDescGZIP(), []int{0, 1, 2}
+}
+
+func (m *GrpcService_GoogleGrpc_ChannelCredentials) GetCredentialSpecifier() isGrpcService_GoogleGrpc_ChannelCredentials_CredentialSpecifier {
+ if m != nil {
+ return m.CredentialSpecifier
+ }
+ return nil
+}
+
+func (x *GrpcService_GoogleGrpc_ChannelCredentials) GetSslCredentials() *GrpcService_GoogleGrpc_SslCredentials {
+ if x, ok := x.GetCredentialSpecifier().(*GrpcService_GoogleGrpc_ChannelCredentials_SslCredentials); ok {
+ return x.SslCredentials
+ }
+ return nil
+}
+
+func (x *GrpcService_GoogleGrpc_ChannelCredentials) GetGoogleDefault() *emptypb.Empty {
+ if x, ok := x.GetCredentialSpecifier().(*GrpcService_GoogleGrpc_ChannelCredentials_GoogleDefault); ok {
+ return x.GoogleDefault
+ }
+ return nil
+}
+
+func (x *GrpcService_GoogleGrpc_ChannelCredentials) GetLocalCredentials() *GrpcService_GoogleGrpc_GoogleLocalCredentials {
+ if x, ok := x.GetCredentialSpecifier().(*GrpcService_GoogleGrpc_ChannelCredentials_LocalCredentials); ok {
+ return x.LocalCredentials
+ }
+ return nil
+}
+
+type isGrpcService_GoogleGrpc_ChannelCredentials_CredentialSpecifier interface {
+ isGrpcService_GoogleGrpc_ChannelCredentials_CredentialSpecifier()
+}
+
+type GrpcService_GoogleGrpc_ChannelCredentials_SslCredentials struct {
+ SslCredentials *GrpcService_GoogleGrpc_SslCredentials `protobuf:"bytes,1,opt,name=ssl_credentials,json=sslCredentials,proto3,oneof"`
+}
+
+type GrpcService_GoogleGrpc_ChannelCredentials_GoogleDefault struct {
+ // https://grpc.io/grpc/cpp/namespacegrpc.html#a6beb3ac70ff94bd2ebbd89b8f21d1f61
+ GoogleDefault *emptypb.Empty `protobuf:"bytes,2,opt,name=google_default,json=googleDefault,proto3,oneof"`
+}
+
+type GrpcService_GoogleGrpc_ChannelCredentials_LocalCredentials struct {
+ LocalCredentials *GrpcService_GoogleGrpc_GoogleLocalCredentials `protobuf:"bytes,3,opt,name=local_credentials,json=localCredentials,proto3,oneof"`
+}
+
+func (*GrpcService_GoogleGrpc_ChannelCredentials_SslCredentials) isGrpcService_GoogleGrpc_ChannelCredentials_CredentialSpecifier() {
+}
+
+func (*GrpcService_GoogleGrpc_ChannelCredentials_GoogleDefault) isGrpcService_GoogleGrpc_ChannelCredentials_CredentialSpecifier() {
+}
+
+func (*GrpcService_GoogleGrpc_ChannelCredentials_LocalCredentials) isGrpcService_GoogleGrpc_ChannelCredentials_CredentialSpecifier() {
+}
+
+// [#next-free-field: 8]
+type GrpcService_GoogleGrpc_CallCredentials struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to CredentialSpecifier:
+ //
+ // *GrpcService_GoogleGrpc_CallCredentials_AccessToken
+ // *GrpcService_GoogleGrpc_CallCredentials_GoogleComputeEngine
+ // *GrpcService_GoogleGrpc_CallCredentials_GoogleRefreshToken
+ // *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJwtAccess
+ // *GrpcService_GoogleGrpc_CallCredentials_GoogleIam
+ // *GrpcService_GoogleGrpc_CallCredentials_FromPlugin
+ // *GrpcService_GoogleGrpc_CallCredentials_StsService_
+ CredentialSpecifier isGrpcService_GoogleGrpc_CallCredentials_CredentialSpecifier `protobuf_oneof:"credential_specifier"`
+}
+
+func (x *GrpcService_GoogleGrpc_CallCredentials) Reset() {
+ *x = GrpcService_GoogleGrpc_CallCredentials{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GrpcService_GoogleGrpc_CallCredentials) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GrpcService_GoogleGrpc_CallCredentials) ProtoMessage() {}
+
+func (x *GrpcService_GoogleGrpc_CallCredentials) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GrpcService_GoogleGrpc_CallCredentials.ProtoReflect.Descriptor instead.
+func (*GrpcService_GoogleGrpc_CallCredentials) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_grpc_service_proto_rawDescGZIP(), []int{0, 1, 3}
+}
+
+func (m *GrpcService_GoogleGrpc_CallCredentials) GetCredentialSpecifier() isGrpcService_GoogleGrpc_CallCredentials_CredentialSpecifier {
+ if m != nil {
+ return m.CredentialSpecifier
+ }
+ return nil
+}
+
+func (x *GrpcService_GoogleGrpc_CallCredentials) GetAccessToken() string {
+ if x, ok := x.GetCredentialSpecifier().(*GrpcService_GoogleGrpc_CallCredentials_AccessToken); ok {
+ return x.AccessToken
+ }
+ return ""
+}
+
+func (x *GrpcService_GoogleGrpc_CallCredentials) GetGoogleComputeEngine() *emptypb.Empty {
+ if x, ok := x.GetCredentialSpecifier().(*GrpcService_GoogleGrpc_CallCredentials_GoogleComputeEngine); ok {
+ return x.GoogleComputeEngine
+ }
+ return nil
+}
+
+func (x *GrpcService_GoogleGrpc_CallCredentials) GetGoogleRefreshToken() string {
+ if x, ok := x.GetCredentialSpecifier().(*GrpcService_GoogleGrpc_CallCredentials_GoogleRefreshToken); ok {
+ return x.GoogleRefreshToken
+ }
+ return ""
+}
+
+func (x *GrpcService_GoogleGrpc_CallCredentials) GetServiceAccountJwtAccess() *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials {
+ if x, ok := x.GetCredentialSpecifier().(*GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJwtAccess); ok {
+ return x.ServiceAccountJwtAccess
+ }
+ return nil
+}
+
+func (x *GrpcService_GoogleGrpc_CallCredentials) GetGoogleIam() *GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials {
+ if x, ok := x.GetCredentialSpecifier().(*GrpcService_GoogleGrpc_CallCredentials_GoogleIam); ok {
+ return x.GoogleIam
+ }
+ return nil
+}
+
+func (x *GrpcService_GoogleGrpc_CallCredentials) GetFromPlugin() *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin {
+ if x, ok := x.GetCredentialSpecifier().(*GrpcService_GoogleGrpc_CallCredentials_FromPlugin); ok {
+ return x.FromPlugin
+ }
+ return nil
+}
+
+func (x *GrpcService_GoogleGrpc_CallCredentials) GetStsService() *GrpcService_GoogleGrpc_CallCredentials_StsService {
+ if x, ok := x.GetCredentialSpecifier().(*GrpcService_GoogleGrpc_CallCredentials_StsService_); ok {
+ return x.StsService
+ }
+ return nil
+}
+
+type isGrpcService_GoogleGrpc_CallCredentials_CredentialSpecifier interface {
+ isGrpcService_GoogleGrpc_CallCredentials_CredentialSpecifier()
+}
+
+type GrpcService_GoogleGrpc_CallCredentials_AccessToken struct {
+ // Access token credentials.
+ // https://grpc.io/grpc/cpp/namespacegrpc.html#ad3a80da696ffdaea943f0f858d7a360d.
+ AccessToken string `protobuf:"bytes,1,opt,name=access_token,json=accessToken,proto3,oneof"`
+}
+
+type GrpcService_GoogleGrpc_CallCredentials_GoogleComputeEngine struct {
+ // Google Compute Engine credentials.
+ // https://grpc.io/grpc/cpp/namespacegrpc.html#a6beb3ac70ff94bd2ebbd89b8f21d1f61
+ GoogleComputeEngine *emptypb.Empty `protobuf:"bytes,2,opt,name=google_compute_engine,json=googleComputeEngine,proto3,oneof"`
+}
+
+type GrpcService_GoogleGrpc_CallCredentials_GoogleRefreshToken struct {
+ // Google refresh token credentials.
+ // https://grpc.io/grpc/cpp/namespacegrpc.html#a96901c997b91bc6513b08491e0dca37c.
+ GoogleRefreshToken string `protobuf:"bytes,3,opt,name=google_refresh_token,json=googleRefreshToken,proto3,oneof"`
+}
+
+type GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJwtAccess struct {
+ // Service Account JWT Access credentials.
+ // https://grpc.io/grpc/cpp/namespacegrpc.html#a92a9f959d6102461f66ee973d8e9d3aa.
+ ServiceAccountJwtAccess *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials `protobuf:"bytes,4,opt,name=service_account_jwt_access,json=serviceAccountJwtAccess,proto3,oneof"`
+}
+
+type GrpcService_GoogleGrpc_CallCredentials_GoogleIam struct {
+ // Google IAM credentials.
+ // https://grpc.io/grpc/cpp/namespacegrpc.html#a9fc1fc101b41e680d47028166e76f9d0.
+ GoogleIam *GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials `protobuf:"bytes,5,opt,name=google_iam,json=googleIam,proto3,oneof"`
+}
+
+type GrpcService_GoogleGrpc_CallCredentials_FromPlugin struct {
+ // Custom authenticator credentials.
+ // https://grpc.io/grpc/cpp/namespacegrpc.html#a823c6a4b19ffc71fb33e90154ee2ad07.
+ // https://grpc.io/docs/guides/auth.html#extending-grpc-to-support-other-authentication-mechanisms.
+ FromPlugin *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin `protobuf:"bytes,6,opt,name=from_plugin,json=fromPlugin,proto3,oneof"`
+}
+
+type GrpcService_GoogleGrpc_CallCredentials_StsService_ struct {
+ // Custom security token service which implements OAuth 2.0 token exchange.
+ // https://tools.ietf.org/html/draft-ietf-oauth-token-exchange-16
+ // See https://github.com/grpc/grpc/pull/19587.
+ StsService *GrpcService_GoogleGrpc_CallCredentials_StsService `protobuf:"bytes,7,opt,name=sts_service,json=stsService,proto3,oneof"`
+}
+
+func (*GrpcService_GoogleGrpc_CallCredentials_AccessToken) isGrpcService_GoogleGrpc_CallCredentials_CredentialSpecifier() {
+}
+
+func (*GrpcService_GoogleGrpc_CallCredentials_GoogleComputeEngine) isGrpcService_GoogleGrpc_CallCredentials_CredentialSpecifier() {
+}
+
+func (*GrpcService_GoogleGrpc_CallCredentials_GoogleRefreshToken) isGrpcService_GoogleGrpc_CallCredentials_CredentialSpecifier() {
+}
+
+func (*GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJwtAccess) isGrpcService_GoogleGrpc_CallCredentials_CredentialSpecifier() {
+}
+
+func (*GrpcService_GoogleGrpc_CallCredentials_GoogleIam) isGrpcService_GoogleGrpc_CallCredentials_CredentialSpecifier() {
+}
+
+func (*GrpcService_GoogleGrpc_CallCredentials_FromPlugin) isGrpcService_GoogleGrpc_CallCredentials_CredentialSpecifier() {
+}
+
+func (*GrpcService_GoogleGrpc_CallCredentials_StsService_) isGrpcService_GoogleGrpc_CallCredentials_CredentialSpecifier() {
+}
+
+// Channel arguments.
+type GrpcService_GoogleGrpc_ChannelArgs struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // See grpc_types.h GRPC_ARG #defines for keys that work here.
+ Args map[string]*GrpcService_GoogleGrpc_ChannelArgs_Value `protobuf:"bytes,1,rep,name=args,proto3" json:"args,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+func (x *GrpcService_GoogleGrpc_ChannelArgs) Reset() {
+ *x = GrpcService_GoogleGrpc_ChannelArgs{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GrpcService_GoogleGrpc_ChannelArgs) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GrpcService_GoogleGrpc_ChannelArgs) ProtoMessage() {}
+
+func (x *GrpcService_GoogleGrpc_ChannelArgs) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GrpcService_GoogleGrpc_ChannelArgs.ProtoReflect.Descriptor instead.
+func (*GrpcService_GoogleGrpc_ChannelArgs) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_grpc_service_proto_rawDescGZIP(), []int{0, 1, 4}
+}
+
+func (x *GrpcService_GoogleGrpc_ChannelArgs) GetArgs() map[string]*GrpcService_GoogleGrpc_ChannelArgs_Value {
+ if x != nil {
+ return x.Args
+ }
+ return nil
+}
+
+type GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ JsonKey string `protobuf:"bytes,1,opt,name=json_key,json=jsonKey,proto3" json:"json_key,omitempty"`
+ TokenLifetimeSeconds uint64 `protobuf:"varint,2,opt,name=token_lifetime_seconds,json=tokenLifetimeSeconds,proto3" json:"token_lifetime_seconds,omitempty"`
+}
+
+func (x *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials) Reset() {
+ *x = GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials) ProtoMessage() {}
+
+func (x *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials.ProtoReflect.Descriptor instead.
+func (*GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_grpc_service_proto_rawDescGZIP(), []int{0, 1, 3, 0}
+}
+
+func (x *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials) GetJsonKey() string {
+ if x != nil {
+ return x.JsonKey
+ }
+ return ""
+}
+
+func (x *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials) GetTokenLifetimeSeconds() uint64 {
+ if x != nil {
+ return x.TokenLifetimeSeconds
+ }
+ return 0
+}
+
+type GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ AuthorizationToken string `protobuf:"bytes,1,opt,name=authorization_token,json=authorizationToken,proto3" json:"authorization_token,omitempty"`
+ AuthoritySelector string `protobuf:"bytes,2,opt,name=authority_selector,json=authoritySelector,proto3" json:"authority_selector,omitempty"`
+}
+
+func (x *GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials) Reset() {
+ *x = GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials) ProtoMessage() {}
+
+func (x *GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials.ProtoReflect.Descriptor instead.
+func (*GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_grpc_service_proto_rawDescGZIP(), []int{0, 1, 3, 1}
+}
+
+func (x *GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials) GetAuthorizationToken() string {
+ if x != nil {
+ return x.AuthorizationToken
+ }
+ return ""
+}
+
+func (x *GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials) GetAuthoritySelector() string {
+ if x != nil {
+ return x.AuthoritySelector
+ }
+ return ""
+}
+
+type GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // [#extension-category: envoy.grpc_credentials]
+ //
+ // Types that are assignable to ConfigType:
+ //
+ // *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_TypedConfig
+ ConfigType isGrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_ConfigType `protobuf_oneof:"config_type"`
+}
+
+func (x *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin) Reset() {
+ *x = GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin) ProtoMessage() {}
+
+func (x *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[10]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin.ProtoReflect.Descriptor instead.
+func (*GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_grpc_service_proto_rawDescGZIP(), []int{0, 1, 3, 2}
+}
+
+func (x *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (m *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin) GetConfigType() isGrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_ConfigType {
+ if m != nil {
+ return m.ConfigType
+ }
+ return nil
+}
+
+func (x *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin) GetTypedConfig() *anypb.Any {
+ if x, ok := x.GetConfigType().(*GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_TypedConfig); ok {
+ return x.TypedConfig
+ }
+ return nil
+}
+
+type isGrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_ConfigType interface {
+ isGrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_ConfigType()
+}
+
+type GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_TypedConfig struct {
+ TypedConfig *anypb.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"`
+}
+
+func (*GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_TypedConfig) isGrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_ConfigType() {
+}
+
+// Security token service configuration that allows Google gRPC to
+// fetch security token from an OAuth 2.0 authorization server.
+// See https://tools.ietf.org/html/draft-ietf-oauth-token-exchange-16 and
+// https://github.com/grpc/grpc/pull/19587.
+// [#next-free-field: 10]
+type GrpcService_GoogleGrpc_CallCredentials_StsService struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // URI of the token exchange service that handles token exchange requests.
+ // [#comment:TODO(asraa): Add URI validation when implemented. Tracked by
+ // https://github.com/bufbuild/protoc-gen-validate/issues/303]
+ TokenExchangeServiceUri string `protobuf:"bytes,1,opt,name=token_exchange_service_uri,json=tokenExchangeServiceUri,proto3" json:"token_exchange_service_uri,omitempty"`
+ // Location of the target service or resource where the client
+ // intends to use the requested security token.
+ Resource string `protobuf:"bytes,2,opt,name=resource,proto3" json:"resource,omitempty"`
+ // Logical name of the target service where the client intends to
+ // use the requested security token.
+ Audience string `protobuf:"bytes,3,opt,name=audience,proto3" json:"audience,omitempty"`
+ // The desired scope of the requested security token in the
+ // context of the service or resource where the token will be used.
+ Scope string `protobuf:"bytes,4,opt,name=scope,proto3" json:"scope,omitempty"`
+ // Type of the requested security token.
+ RequestedTokenType string `protobuf:"bytes,5,opt,name=requested_token_type,json=requestedTokenType,proto3" json:"requested_token_type,omitempty"`
+ // The path of subject token, a security token that represents the
+ // identity of the party on behalf of whom the request is being made.
+ SubjectTokenPath string `protobuf:"bytes,6,opt,name=subject_token_path,json=subjectTokenPath,proto3" json:"subject_token_path,omitempty"`
+ // Type of the subject token.
+ SubjectTokenType string `protobuf:"bytes,7,opt,name=subject_token_type,json=subjectTokenType,proto3" json:"subject_token_type,omitempty"`
+ // The path of actor token, a security token that represents the identity
+ // of the acting party. The acting party is authorized to use the
+ // requested security token and act on behalf of the subject.
+ ActorTokenPath string `protobuf:"bytes,8,opt,name=actor_token_path,json=actorTokenPath,proto3" json:"actor_token_path,omitempty"`
+ // Type of the actor token.
+ ActorTokenType string `protobuf:"bytes,9,opt,name=actor_token_type,json=actorTokenType,proto3" json:"actor_token_type,omitempty"`
+}
+
+func (x *GrpcService_GoogleGrpc_CallCredentials_StsService) Reset() {
+ *x = GrpcService_GoogleGrpc_CallCredentials_StsService{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GrpcService_GoogleGrpc_CallCredentials_StsService) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GrpcService_GoogleGrpc_CallCredentials_StsService) ProtoMessage() {}
+
+func (x *GrpcService_GoogleGrpc_CallCredentials_StsService) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[11]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GrpcService_GoogleGrpc_CallCredentials_StsService.ProtoReflect.Descriptor instead.
+func (*GrpcService_GoogleGrpc_CallCredentials_StsService) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_grpc_service_proto_rawDescGZIP(), []int{0, 1, 3, 3}
+}
+
+func (x *GrpcService_GoogleGrpc_CallCredentials_StsService) GetTokenExchangeServiceUri() string {
+ if x != nil {
+ return x.TokenExchangeServiceUri
+ }
+ return ""
+}
+
+func (x *GrpcService_GoogleGrpc_CallCredentials_StsService) GetResource() string {
+ if x != nil {
+ return x.Resource
+ }
+ return ""
+}
+
+func (x *GrpcService_GoogleGrpc_CallCredentials_StsService) GetAudience() string {
+ if x != nil {
+ return x.Audience
+ }
+ return ""
+}
+
+func (x *GrpcService_GoogleGrpc_CallCredentials_StsService) GetScope() string {
+ if x != nil {
+ return x.Scope
+ }
+ return ""
+}
+
+func (x *GrpcService_GoogleGrpc_CallCredentials_StsService) GetRequestedTokenType() string {
+ if x != nil {
+ return x.RequestedTokenType
+ }
+ return ""
+}
+
+func (x *GrpcService_GoogleGrpc_CallCredentials_StsService) GetSubjectTokenPath() string {
+ if x != nil {
+ return x.SubjectTokenPath
+ }
+ return ""
+}
+
+func (x *GrpcService_GoogleGrpc_CallCredentials_StsService) GetSubjectTokenType() string {
+ if x != nil {
+ return x.SubjectTokenType
+ }
+ return ""
+}
+
+func (x *GrpcService_GoogleGrpc_CallCredentials_StsService) GetActorTokenPath() string {
+ if x != nil {
+ return x.ActorTokenPath
+ }
+ return ""
+}
+
+func (x *GrpcService_GoogleGrpc_CallCredentials_StsService) GetActorTokenType() string {
+ if x != nil {
+ return x.ActorTokenType
+ }
+ return ""
+}
+
+type GrpcService_GoogleGrpc_ChannelArgs_Value struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Pointer values are not supported, since they don't make any sense when
+ // delivered via the API.
+ //
+ // Types that are assignable to ValueSpecifier:
+ //
+ // *GrpcService_GoogleGrpc_ChannelArgs_Value_StringValue
+ // *GrpcService_GoogleGrpc_ChannelArgs_Value_IntValue
+ ValueSpecifier isGrpcService_GoogleGrpc_ChannelArgs_Value_ValueSpecifier `protobuf_oneof:"value_specifier"`
+}
+
+func (x *GrpcService_GoogleGrpc_ChannelArgs_Value) Reset() {
+ *x = GrpcService_GoogleGrpc_ChannelArgs_Value{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GrpcService_GoogleGrpc_ChannelArgs_Value) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GrpcService_GoogleGrpc_ChannelArgs_Value) ProtoMessage() {}
+
+func (x *GrpcService_GoogleGrpc_ChannelArgs_Value) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_grpc_service_proto_msgTypes[12]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GrpcService_GoogleGrpc_ChannelArgs_Value.ProtoReflect.Descriptor instead.
+func (*GrpcService_GoogleGrpc_ChannelArgs_Value) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_grpc_service_proto_rawDescGZIP(), []int{0, 1, 4, 0}
+}
+
+func (m *GrpcService_GoogleGrpc_ChannelArgs_Value) GetValueSpecifier() isGrpcService_GoogleGrpc_ChannelArgs_Value_ValueSpecifier {
+ if m != nil {
+ return m.ValueSpecifier
+ }
+ return nil
+}
+
+func (x *GrpcService_GoogleGrpc_ChannelArgs_Value) GetStringValue() string {
+ if x, ok := x.GetValueSpecifier().(*GrpcService_GoogleGrpc_ChannelArgs_Value_StringValue); ok {
+ return x.StringValue
+ }
+ return ""
+}
+
+func (x *GrpcService_GoogleGrpc_ChannelArgs_Value) GetIntValue() int64 {
+ if x, ok := x.GetValueSpecifier().(*GrpcService_GoogleGrpc_ChannelArgs_Value_IntValue); ok {
+ return x.IntValue
+ }
+ return 0
+}
+
+type isGrpcService_GoogleGrpc_ChannelArgs_Value_ValueSpecifier interface {
+ isGrpcService_GoogleGrpc_ChannelArgs_Value_ValueSpecifier()
+}
+
+type GrpcService_GoogleGrpc_ChannelArgs_Value_StringValue struct {
+ StringValue string `protobuf:"bytes,1,opt,name=string_value,json=stringValue,proto3,oneof"`
+}
+
+type GrpcService_GoogleGrpc_ChannelArgs_Value_IntValue struct {
+ IntValue int64 `protobuf:"varint,2,opt,name=int_value,json=intValue,proto3,oneof"`
+}
+
+func (*GrpcService_GoogleGrpc_ChannelArgs_Value_StringValue) isGrpcService_GoogleGrpc_ChannelArgs_Value_ValueSpecifier() {
+}
+
+func (*GrpcService_GoogleGrpc_ChannelArgs_Value_IntValue) isGrpcService_GoogleGrpc_ChannelArgs_Value_ValueSpecifier() {
+}
+
+var File_envoy_config_core_v3_grpc_service_proto protoreflect.FileDescriptor
+
+var file_envoy_config_core_v3_grpc_service_proto_rawDesc = []byte{
+ 0x0a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63,
+ 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x1a,
+ 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f,
+ 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70,
+ 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e,
+ 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x65, 0x6e, 0x73, 0x69, 0x74, 0x69,
+ 0x76, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61,
+ 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75,
+ 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e,
+ 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f,
+ 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69,
+ 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x22, 0xd2, 0x21, 0x0a, 0x0b, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x12, 0x4c, 0x0a, 0x0a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x5f, 0x67, 0x72, 0x70,
+ 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e,
+ 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47,
+ 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x45, 0x6e, 0x76, 0x6f, 0x79,
+ 0x47, 0x72, 0x70, 0x63, 0x48, 0x00, 0x52, 0x09, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x47, 0x72, 0x70,
+ 0x63, 0x12, 0x4f, 0x0a, 0x0b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5f, 0x67, 0x72, 0x70, 0x63,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72,
+ 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x47, 0x72, 0x70, 0x63, 0x48, 0x00, 0x52, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72,
+ 0x70, 0x63, 0x12, 0x33, 0x0a, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07,
+ 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x4c, 0x0a, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69,
+ 0x61, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x05, 0x20, 0x03, 0x28,
+ 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56,
+ 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0f, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x4d, 0x65, 0x74,
+ 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0xde, 0x01, 0x0a, 0x09, 0x45, 0x6e, 0x76, 0x6f, 0x79, 0x47,
+ 0x72, 0x70, 0x63, 0x12, 0x2a, 0x0a, 0x0c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e,
+ 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02,
+ 0x10, 0x01, 0x52, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12,
+ 0x2f, 0x0a, 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x09, 0x42, 0x11, 0xfa, 0x42, 0x0e, 0x72, 0x0c, 0x10, 0x00, 0x28, 0x80, 0x80, 0x01, 0xc0,
+ 0x01, 0x02, 0xc8, 0x01, 0x00, 0x52, 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79,
+ 0x12, 0x44, 0x0a, 0x0c, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65,
+ 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0b, 0x72, 0x65, 0x74, 0x72, 0x79,
+ 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x3a, 0x2e, 0x9a, 0xc5, 0x88, 0x1e, 0x29, 0x0a, 0x27, 0x65,
+ 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65,
+ 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x45, 0x6e, 0x76,
+ 0x6f, 0x79, 0x47, 0x72, 0x70, 0x63, 0x1a, 0xfa, 0x1c, 0x0a, 0x0a, 0x47, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x47, 0x72, 0x70, 0x63, 0x12, 0x26, 0x0a, 0x0a, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f,
+ 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02,
+ 0x10, 0x01, 0x52, 0x09, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x55, 0x72, 0x69, 0x12, 0x70, 0x0a,
+ 0x13, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74,
+ 0x69, 0x61, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76,
+ 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c,
+ 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x52, 0x12, 0x63, 0x68, 0x61,
+ 0x6e, 0x6e, 0x65, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x12,
+ 0x67, 0x0a, 0x10, 0x63, 0x61, 0x6c, 0x6c, 0x5f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69,
+ 0x61, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33,
+ 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64,
+ 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x52, 0x0f, 0x63, 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65,
+ 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x12, 0x28, 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x74,
+ 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa,
+ 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x50, 0x72, 0x65, 0x66,
+ 0x69, 0x78, 0x12, 0x38, 0x0a, 0x18, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c,
+ 0x73, 0x5f, 0x66, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x16, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c,
+ 0x73, 0x46, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x06,
+ 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53,
+ 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x5e, 0x0a,
+ 0x1d, 0x70, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x62, 0x75, 0x66, 0x66,
+ 0x65, 0x72, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x07,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c,
+ 0x75, 0x65, 0x52, 0x19, 0x70, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x42, 0x75, 0x66,
+ 0x66, 0x65, 0x72, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x5b, 0x0a,
+ 0x0c, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x61, 0x72, 0x67, 0x73, 0x18, 0x08, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53,
+ 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70,
+ 0x63, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x41, 0x72, 0x67, 0x73, 0x52, 0x0b, 0x63,
+ 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x41, 0x72, 0x67, 0x73, 0x1a, 0x9d, 0x02, 0x0a, 0x0e, 0x53,
+ 0x73, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x12, 0x3f, 0x0a,
+ 0x0a, 0x72, 0x6f, 0x6f, 0x74, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x52, 0x09, 0x72, 0x6f, 0x6f, 0x74, 0x43, 0x65, 0x72, 0x74, 0x73, 0x12, 0x49,
+ 0x0a, 0x0b, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x06, 0xb8, 0xb7, 0x8b, 0xa4, 0x02, 0x01, 0x52, 0x0a, 0x70,
+ 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x3f, 0x0a, 0x0a, 0x63, 0x65, 0x72,
+ 0x74, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72,
+ 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52,
+ 0x09, 0x63, 0x65, 0x72, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x3a, 0x3e, 0x9a, 0xc5, 0x88, 0x1e,
+ 0x39, 0x0a, 0x37, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e,
+ 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
+ 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x73, 0x6c, 0x43,
+ 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x1a, 0x60, 0x0a, 0x16, 0x47, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74,
+ 0x69, 0x61, 0x6c, 0x73, 0x3a, 0x46, 0x9a, 0xc5, 0x88, 0x1e, 0x41, 0x0a, 0x3f, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47,
+ 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x4c, 0x6f, 0x63, 0x61,
+ 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x1a, 0x92, 0x03, 0x0a,
+ 0x12, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69,
+ 0x61, 0x6c, 0x73, 0x12, 0x66, 0x0a, 0x0f, 0x73, 0x73, 0x6c, 0x5f, 0x63, 0x72, 0x65, 0x64, 0x65,
+ 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x65,
+ 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65,
+ 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e,
+ 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x73, 0x6c, 0x43, 0x72,
+ 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x48, 0x00, 0x52, 0x0e, 0x73, 0x73, 0x6c,
+ 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x12, 0x3f, 0x0a, 0x0e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x48, 0x00, 0x52, 0x0d, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x72, 0x0a, 0x11,
+ 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c,
+ 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e,
+ 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47,
+ 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x4c, 0x6f, 0x63, 0x61,
+ 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x48, 0x00, 0x52, 0x10,
+ 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73,
+ 0x3a, 0x42, 0x9a, 0xc5, 0x88, 0x1e, 0x3d, 0x0a, 0x3b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61,
+ 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53,
+ 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70,
+ 0x63, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74,
+ 0x69, 0x61, 0x6c, 0x73, 0x42, 0x1b, 0x0a, 0x14, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69,
+ 0x61, 0x6c, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42,
+ 0x01, 0x1a, 0x88, 0x0f, 0x0a, 0x0f, 0x43, 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e,
+ 0x74, 0x69, 0x61, 0x6c, 0x73, 0x12, 0x23, 0x0a, 0x0c, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f,
+ 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x61,
+ 0x63, 0x63, 0x65, 0x73, 0x73, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x4c, 0x0a, 0x15, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, 0x5f, 0x65, 0x6e, 0x67,
+ 0x69, 0x6e, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74,
+ 0x79, 0x48, 0x00, 0x52, 0x13, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x43, 0x6f, 0x6d, 0x70, 0x75,
+ 0x74, 0x65, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x12, 0x32, 0x0a, 0x14, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x5f, 0x72, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x12, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x9e, 0x01, 0x0a,
+ 0x1a, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74,
+ 0x5f, 0x6a, 0x77, 0x74, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x5f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e,
+ 0x43, 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x2e,
+ 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x4a, 0x57,
+ 0x54, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61,
+ 0x6c, 0x73, 0x48, 0x00, 0x52, 0x17, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63,
+ 0x6f, 0x75, 0x6e, 0x74, 0x4a, 0x77, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x72, 0x0a,
+ 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5f, 0x69, 0x61, 0x6d, 0x18, 0x05, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x51, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e,
+ 0x43, 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x2e,
+ 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x49, 0x41, 0x4d, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74,
+ 0x69, 0x61, 0x6c, 0x73, 0x48, 0x00, 0x52, 0x09, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x49, 0x61,
+ 0x6d, 0x12, 0x7d, 0x0a, 0x0b, 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e,
+ 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x5a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72,
+ 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x47, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74,
+ 0x69, 0x61, 0x6c, 0x73, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x43, 0x72, 0x65,
+ 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x46, 0x72, 0x6f, 0x6d, 0x50, 0x6c, 0x75, 0x67,
+ 0x69, 0x6e, 0x48, 0x00, 0x52, 0x0a, 0x66, 0x72, 0x6f, 0x6d, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e,
+ 0x12, 0x6a, 0x0a, 0x0b, 0x73, 0x74, 0x73, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18,
+ 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x47, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70,
+ 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47,
+ 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69,
+ 0x61, 0x6c, 0x73, 0x2e, 0x53, 0x74, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x48, 0x00,
+ 0x52, 0x0a, 0x73, 0x74, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x1a, 0xd9, 0x01, 0x0a,
+ 0x22, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x4a,
+ 0x57, 0x54, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69,
+ 0x61, 0x6c, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6a, 0x73, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x12, 0x34,
+ 0x0a, 0x16, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x6c, 0x69, 0x66, 0x65, 0x74, 0x69, 0x6d, 0x65,
+ 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x14,
+ 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x4c, 0x69, 0x66, 0x65, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x63,
+ 0x6f, 0x6e, 0x64, 0x73, 0x3a, 0x62, 0x9a, 0xc5, 0x88, 0x1e, 0x5d, 0x0a, 0x5b, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47,
+ 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e,
+ 0x74, 0x69, 0x61, 0x6c, 0x73, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63,
+ 0x6f, 0x75, 0x6e, 0x74, 0x4a, 0x57, 0x54, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x72, 0x65,
+ 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x1a, 0xcc, 0x01, 0x0a, 0x14, 0x47, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x49, 0x41, 0x4d, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c,
+ 0x73, 0x12, 0x2f, 0x0a, 0x13, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12,
+ 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x6b,
+ 0x65, 0x6e, 0x12, 0x2d, 0x0a, 0x12, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x5f,
+ 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11,
+ 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f,
+ 0x72, 0x3a, 0x54, 0x9a, 0xc5, 0x88, 0x1e, 0x4f, 0x0a, 0x4d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e,
+ 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70, 0x63,
+ 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72,
+ 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61,
+ 0x6c, 0x73, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x49, 0x41, 0x4d, 0x43, 0x72, 0x65, 0x64,
+ 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x1a, 0xea, 0x01, 0x0a, 0x1d, 0x4d, 0x65, 0x74, 0x61,
+ 0x64, 0x61, 0x74, 0x61, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x46,
+ 0x72, 0x6f, 0x6d, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d,
+ 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a,
+ 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x74, 0x79, 0x70,
+ 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x5d, 0x9a, 0xc5, 0x88, 0x1e, 0x58, 0x0a,
+ 0x56, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f,
+ 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x43, 0x72,
+ 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61,
+ 0x74, 0x61, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x46, 0x72, 0x6f,
+ 0x6d, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x42, 0x0d, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x06, 0x63, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x1a, 0xd7, 0x03, 0x0a, 0x0a, 0x53, 0x74, 0x73, 0x53, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x12, 0x3b, 0x0a, 0x1a, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x65, 0x78, 0x63,
+ 0x68, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x75, 0x72,
+ 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x17, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x45, 0x78,
+ 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x72, 0x69,
+ 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x08,
+ 0x61, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08,
+ 0x61, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x63, 0x6f, 0x70,
+ 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x12, 0x30,
+ 0x0a, 0x14, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x6f, 0x6b, 0x65,
+ 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x72, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x54, 0x79, 0x70, 0x65,
+ 0x12, 0x35, 0x0a, 0x12, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x6f, 0x6b, 0x65,
+ 0x6e, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42,
+ 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x10, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x6f,
+ 0x6b, 0x65, 0x6e, 0x50, 0x61, 0x74, 0x68, 0x12, 0x35, 0x0a, 0x12, 0x73, 0x75, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x07, 0x20,
+ 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x10, 0x73, 0x75,
+ 0x62, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x28,
+ 0x0a, 0x10, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x70, 0x61,
+ 0x74, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x54,
+ 0x6f, 0x6b, 0x65, 0x6e, 0x50, 0x61, 0x74, 0x68, 0x12, 0x28, 0x0a, 0x10, 0x61, 0x63, 0x74, 0x6f,
+ 0x72, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x09, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x0e, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x54, 0x79,
+ 0x70, 0x65, 0x3a, 0x4a, 0x9a, 0xc5, 0x88, 0x1e, 0x45, 0x0a, 0x43, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70,
+ 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47,
+ 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69,
+ 0x61, 0x6c, 0x73, 0x2e, 0x53, 0x74, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x3a, 0x3f,
+ 0x9a, 0xc5, 0x88, 0x1e, 0x3a, 0x0a, 0x38, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69,
+ 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e,
+ 0x43, 0x61, 0x6c, 0x6c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x42,
+ 0x1b, 0x0a, 0x14, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x73, 0x70,
+ 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x1a, 0xc3, 0x02, 0x0a,
+ 0x0b, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x41, 0x72, 0x67, 0x73, 0x12, 0x56, 0x0a, 0x04,
+ 0x61, 0x72, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76,
+ 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c,
+ 0x41, 0x72, 0x67, 0x73, 0x2e, 0x41, 0x72, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04,
+ 0x61, 0x72, 0x67, 0x73, 0x1a, 0x63, 0x0a, 0x05, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a,
+ 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c,
+ 0x75, 0x65, 0x12, 0x1d, 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x08, 0x69, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x42, 0x16, 0x0a, 0x0f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69,
+ 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x1a, 0x77, 0x0a, 0x09, 0x41, 0x72, 0x67,
+ 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x54, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e,
+ 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x47,
+ 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x41, 0x72, 0x67,
+ 0x73, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02,
+ 0x38, 0x01, 0x3a, 0x2f, 0x9a, 0xc5, 0x88, 0x1e, 0x2a, 0x0a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72, 0x70,
+ 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x47,
+ 0x72, 0x70, 0x63, 0x3a, 0x24, 0x9a, 0xc5, 0x88, 0x1e, 0x1f, 0x0a, 0x1d, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72,
+ 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x42, 0x17, 0x0a, 0x10, 0x74, 0x61, 0x72,
+ 0x67, 0x65, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8,
+ 0x42, 0x01, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x42, 0x84, 0x01, 0x0a, 0x22, 0x69, 0x6f, 0x2e,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42,
+ 0x10, 0x47, 0x72, 0x70, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74,
+ 0x6f, 0x50, 0x01, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f,
+ 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33,
+ 0x3b, 0x63, 0x6f, 0x72, 0x65, 0x76, 0x33, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62,
+ 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_config_core_v3_grpc_service_proto_rawDescOnce sync.Once
+ file_envoy_config_core_v3_grpc_service_proto_rawDescData = file_envoy_config_core_v3_grpc_service_proto_rawDesc
+)
+
+func file_envoy_config_core_v3_grpc_service_proto_rawDescGZIP() []byte {
+ file_envoy_config_core_v3_grpc_service_proto_rawDescOnce.Do(func() {
+ file_envoy_config_core_v3_grpc_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_core_v3_grpc_service_proto_rawDescData)
+ })
+ return file_envoy_config_core_v3_grpc_service_proto_rawDescData
+}
+
+var file_envoy_config_core_v3_grpc_service_proto_msgTypes = make([]protoimpl.MessageInfo, 14)
+var file_envoy_config_core_v3_grpc_service_proto_goTypes = []interface{}{
+ (*GrpcService)(nil), // 0: envoy.config.core.v3.GrpcService
+ (*GrpcService_EnvoyGrpc)(nil), // 1: envoy.config.core.v3.GrpcService.EnvoyGrpc
+ (*GrpcService_GoogleGrpc)(nil), // 2: envoy.config.core.v3.GrpcService.GoogleGrpc
+ (*GrpcService_GoogleGrpc_SslCredentials)(nil), // 3: envoy.config.core.v3.GrpcService.GoogleGrpc.SslCredentials
+ (*GrpcService_GoogleGrpc_GoogleLocalCredentials)(nil), // 4: envoy.config.core.v3.GrpcService.GoogleGrpc.GoogleLocalCredentials
+ (*GrpcService_GoogleGrpc_ChannelCredentials)(nil), // 5: envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelCredentials
+ (*GrpcService_GoogleGrpc_CallCredentials)(nil), // 6: envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials
+ (*GrpcService_GoogleGrpc_ChannelArgs)(nil), // 7: envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs
+ (*GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials)(nil), // 8: envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.ServiceAccountJWTAccessCredentials
+ (*GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials)(nil), // 9: envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.GoogleIAMCredentials
+ (*GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin)(nil), // 10: envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.MetadataCredentialsFromPlugin
+ (*GrpcService_GoogleGrpc_CallCredentials_StsService)(nil), // 11: envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.StsService
+ (*GrpcService_GoogleGrpc_ChannelArgs_Value)(nil), // 12: envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs.Value
+ nil, // 13: envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs.ArgsEntry
+ (*durationpb.Duration)(nil), // 14: google.protobuf.Duration
+ (*HeaderValue)(nil), // 15: envoy.config.core.v3.HeaderValue
+ (*RetryPolicy)(nil), // 16: envoy.config.core.v3.RetryPolicy
+ (*structpb.Struct)(nil), // 17: google.protobuf.Struct
+ (*wrapperspb.UInt32Value)(nil), // 18: google.protobuf.UInt32Value
+ (*DataSource)(nil), // 19: envoy.config.core.v3.DataSource
+ (*emptypb.Empty)(nil), // 20: google.protobuf.Empty
+ (*anypb.Any)(nil), // 21: google.protobuf.Any
+}
+var file_envoy_config_core_v3_grpc_service_proto_depIdxs = []int32{
+ 1, // 0: envoy.config.core.v3.GrpcService.envoy_grpc:type_name -> envoy.config.core.v3.GrpcService.EnvoyGrpc
+ 2, // 1: envoy.config.core.v3.GrpcService.google_grpc:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc
+ 14, // 2: envoy.config.core.v3.GrpcService.timeout:type_name -> google.protobuf.Duration
+ 15, // 3: envoy.config.core.v3.GrpcService.initial_metadata:type_name -> envoy.config.core.v3.HeaderValue
+ 16, // 4: envoy.config.core.v3.GrpcService.EnvoyGrpc.retry_policy:type_name -> envoy.config.core.v3.RetryPolicy
+ 5, // 5: envoy.config.core.v3.GrpcService.GoogleGrpc.channel_credentials:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelCredentials
+ 6, // 6: envoy.config.core.v3.GrpcService.GoogleGrpc.call_credentials:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials
+ 17, // 7: envoy.config.core.v3.GrpcService.GoogleGrpc.config:type_name -> google.protobuf.Struct
+ 18, // 8: envoy.config.core.v3.GrpcService.GoogleGrpc.per_stream_buffer_limit_bytes:type_name -> google.protobuf.UInt32Value
+ 7, // 9: envoy.config.core.v3.GrpcService.GoogleGrpc.channel_args:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs
+ 19, // 10: envoy.config.core.v3.GrpcService.GoogleGrpc.SslCredentials.root_certs:type_name -> envoy.config.core.v3.DataSource
+ 19, // 11: envoy.config.core.v3.GrpcService.GoogleGrpc.SslCredentials.private_key:type_name -> envoy.config.core.v3.DataSource
+ 19, // 12: envoy.config.core.v3.GrpcService.GoogleGrpc.SslCredentials.cert_chain:type_name -> envoy.config.core.v3.DataSource
+ 3, // 13: envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelCredentials.ssl_credentials:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.SslCredentials
+ 20, // 14: envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelCredentials.google_default:type_name -> google.protobuf.Empty
+ 4, // 15: envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelCredentials.local_credentials:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.GoogleLocalCredentials
+ 20, // 16: envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.google_compute_engine:type_name -> google.protobuf.Empty
+ 8, // 17: envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.service_account_jwt_access:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.ServiceAccountJWTAccessCredentials
+ 9, // 18: envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.google_iam:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.GoogleIAMCredentials
+ 10, // 19: envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.from_plugin:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.MetadataCredentialsFromPlugin
+ 11, // 20: envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.sts_service:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.StsService
+ 13, // 21: envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs.args:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs.ArgsEntry
+ 21, // 22: envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.MetadataCredentialsFromPlugin.typed_config:type_name -> google.protobuf.Any
+ 12, // 23: envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs.ArgsEntry.value:type_name -> envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs.Value
+ 24, // [24:24] is the sub-list for method output_type
+ 24, // [24:24] is the sub-list for method input_type
+ 24, // [24:24] is the sub-list for extension type_name
+ 24, // [24:24] is the sub-list for extension extendee
+ 0, // [0:24] is the sub-list for field type_name
+}
+
+func init() { file_envoy_config_core_v3_grpc_service_proto_init() }
+func file_envoy_config_core_v3_grpc_service_proto_init() {
+ if File_envoy_config_core_v3_grpc_service_proto != nil {
+ return
+ }
+ file_envoy_config_core_v3_base_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_config_core_v3_grpc_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GrpcService); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_grpc_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GrpcService_EnvoyGrpc); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_grpc_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GrpcService_GoogleGrpc); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_grpc_service_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GrpcService_GoogleGrpc_SslCredentials); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_grpc_service_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GrpcService_GoogleGrpc_GoogleLocalCredentials); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_grpc_service_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GrpcService_GoogleGrpc_ChannelCredentials); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_grpc_service_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GrpcService_GoogleGrpc_CallCredentials); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_grpc_service_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GrpcService_GoogleGrpc_ChannelArgs); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_grpc_service_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_grpc_service_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_grpc_service_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_grpc_service_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GrpcService_GoogleGrpc_CallCredentials_StsService); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_grpc_service_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GrpcService_GoogleGrpc_ChannelArgs_Value); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_envoy_config_core_v3_grpc_service_proto_msgTypes[0].OneofWrappers = []interface{}{
+ (*GrpcService_EnvoyGrpc_)(nil),
+ (*GrpcService_GoogleGrpc_)(nil),
+ }
+ file_envoy_config_core_v3_grpc_service_proto_msgTypes[5].OneofWrappers = []interface{}{
+ (*GrpcService_GoogleGrpc_ChannelCredentials_SslCredentials)(nil),
+ (*GrpcService_GoogleGrpc_ChannelCredentials_GoogleDefault)(nil),
+ (*GrpcService_GoogleGrpc_ChannelCredentials_LocalCredentials)(nil),
+ }
+ file_envoy_config_core_v3_grpc_service_proto_msgTypes[6].OneofWrappers = []interface{}{
+ (*GrpcService_GoogleGrpc_CallCredentials_AccessToken)(nil),
+ (*GrpcService_GoogleGrpc_CallCredentials_GoogleComputeEngine)(nil),
+ (*GrpcService_GoogleGrpc_CallCredentials_GoogleRefreshToken)(nil),
+ (*GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJwtAccess)(nil),
+ (*GrpcService_GoogleGrpc_CallCredentials_GoogleIam)(nil),
+ (*GrpcService_GoogleGrpc_CallCredentials_FromPlugin)(nil),
+ (*GrpcService_GoogleGrpc_CallCredentials_StsService_)(nil),
+ }
+ file_envoy_config_core_v3_grpc_service_proto_msgTypes[10].OneofWrappers = []interface{}{
+ (*GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_TypedConfig)(nil),
+ }
+ file_envoy_config_core_v3_grpc_service_proto_msgTypes[12].OneofWrappers = []interface{}{
+ (*GrpcService_GoogleGrpc_ChannelArgs_Value_StringValue)(nil),
+ (*GrpcService_GoogleGrpc_ChannelArgs_Value_IntValue)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_config_core_v3_grpc_service_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 14,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_config_core_v3_grpc_service_proto_goTypes,
+ DependencyIndexes: file_envoy_config_core_v3_grpc_service_proto_depIdxs,
+ MessageInfos: file_envoy_config_core_v3_grpc_service_proto_msgTypes,
+ }.Build()
+ File_envoy_config_core_v3_grpc_service_proto = out.File
+ file_envoy_config_core_v3_grpc_service_proto_rawDesc = nil
+ file_envoy_config_core_v3_grpc_service_proto_goTypes = nil
+ file_envoy_config_core_v3_grpc_service_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/grpc_service.pb.validate.go b/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/grpc_service.pb.validate.go
new file mode 100644
index 000000000..8fd252375
--- /dev/null
+++ b/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/grpc_service.pb.validate.go
@@ -0,0 +1,2518 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/config/core/v3/grpc_service.proto
+
+package corev3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on GrpcService with the rules defined in
+// the proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *GrpcService) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on GrpcService with the rules defined in
+// the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in GrpcServiceMultiError, or
+// nil if none found.
+func (m *GrpcService) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *GrpcService) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if all {
+ switch v := interface{}(m.GetTimeout()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, GrpcServiceValidationError{
+ field: "Timeout",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, GrpcServiceValidationError{
+ field: "Timeout",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetTimeout()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return GrpcServiceValidationError{
+ field: "Timeout",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ for idx, item := range m.GetInitialMetadata() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, GrpcServiceValidationError{
+ field: fmt.Sprintf("InitialMetadata[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, GrpcServiceValidationError{
+ field: fmt.Sprintf("InitialMetadata[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return GrpcServiceValidationError{
+ field: fmt.Sprintf("InitialMetadata[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ oneofTargetSpecifierPresent := false
+ switch v := m.TargetSpecifier.(type) {
+ case *GrpcService_EnvoyGrpc_:
+ if v == nil {
+ err := GrpcServiceValidationError{
+ field: "TargetSpecifier",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofTargetSpecifierPresent = true
+
+ if all {
+ switch v := interface{}(m.GetEnvoyGrpc()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, GrpcServiceValidationError{
+ field: "EnvoyGrpc",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, GrpcServiceValidationError{
+ field: "EnvoyGrpc",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetEnvoyGrpc()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return GrpcServiceValidationError{
+ field: "EnvoyGrpc",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *GrpcService_GoogleGrpc_:
+ if v == nil {
+ err := GrpcServiceValidationError{
+ field: "TargetSpecifier",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofTargetSpecifierPresent = true
+
+ if all {
+ switch v := interface{}(m.GetGoogleGrpc()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, GrpcServiceValidationError{
+ field: "GoogleGrpc",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, GrpcServiceValidationError{
+ field: "GoogleGrpc",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetGoogleGrpc()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return GrpcServiceValidationError{
+ field: "GoogleGrpc",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ default:
+ _ = v // ensures v is used
+ }
+ if !oneofTargetSpecifierPresent {
+ err := GrpcServiceValidationError{
+ field: "TargetSpecifier",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return GrpcServiceMultiError(errors)
+ }
+
+ return nil
+}
+
+// GrpcServiceMultiError is an error wrapping multiple validation errors
+// returned by GrpcService.ValidateAll() if the designated constraints aren't met.
+type GrpcServiceMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m GrpcServiceMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m GrpcServiceMultiError) AllErrors() []error { return m }
+
+// GrpcServiceValidationError is the validation error returned by
+// GrpcService.Validate if the designated constraints aren't met.
+type GrpcServiceValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e GrpcServiceValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e GrpcServiceValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e GrpcServiceValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e GrpcServiceValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e GrpcServiceValidationError) ErrorName() string { return "GrpcServiceValidationError" }
+
+// Error satisfies the builtin error interface
+func (e GrpcServiceValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sGrpcService.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = GrpcServiceValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = GrpcServiceValidationError{}
+
+// Validate checks the field values on GrpcService_EnvoyGrpc with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *GrpcService_EnvoyGrpc) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on GrpcService_EnvoyGrpc with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// GrpcService_EnvoyGrpcMultiError, or nil if none found.
+func (m *GrpcService_EnvoyGrpc) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *GrpcService_EnvoyGrpc) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if utf8.RuneCountInString(m.GetClusterName()) < 1 {
+ err := GrpcService_EnvoyGrpcValidationError{
+ field: "ClusterName",
+ reason: "value length must be at least 1 runes",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if utf8.RuneCountInString(m.GetAuthority()) < 0 {
+ err := GrpcService_EnvoyGrpcValidationError{
+ field: "Authority",
+ reason: "value length must be at least 0 runes",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if len(m.GetAuthority()) > 16384 {
+ err := GrpcService_EnvoyGrpcValidationError{
+ field: "Authority",
+ reason: "value length must be at most 16384 bytes",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if !_GrpcService_EnvoyGrpc_Authority_Pattern.MatchString(m.GetAuthority()) {
+ err := GrpcService_EnvoyGrpcValidationError{
+ field: "Authority",
+ reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetRetryPolicy()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, GrpcService_EnvoyGrpcValidationError{
+ field: "RetryPolicy",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, GrpcService_EnvoyGrpcValidationError{
+ field: "RetryPolicy",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetRetryPolicy()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return GrpcService_EnvoyGrpcValidationError{
+ field: "RetryPolicy",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return GrpcService_EnvoyGrpcMultiError(errors)
+ }
+
+ return nil
+}
+
+// GrpcService_EnvoyGrpcMultiError is an error wrapping multiple validation
+// errors returned by GrpcService_EnvoyGrpc.ValidateAll() if the designated
+// constraints aren't met.
+type GrpcService_EnvoyGrpcMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m GrpcService_EnvoyGrpcMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m GrpcService_EnvoyGrpcMultiError) AllErrors() []error { return m }
+
+// GrpcService_EnvoyGrpcValidationError is the validation error returned by
+// GrpcService_EnvoyGrpc.Validate if the designated constraints aren't met.
+type GrpcService_EnvoyGrpcValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e GrpcService_EnvoyGrpcValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e GrpcService_EnvoyGrpcValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e GrpcService_EnvoyGrpcValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e GrpcService_EnvoyGrpcValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e GrpcService_EnvoyGrpcValidationError) ErrorName() string {
+ return "GrpcService_EnvoyGrpcValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e GrpcService_EnvoyGrpcValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sGrpcService_EnvoyGrpc.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = GrpcService_EnvoyGrpcValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = GrpcService_EnvoyGrpcValidationError{}
+
+var _GrpcService_EnvoyGrpc_Authority_Pattern = regexp.MustCompile("^[^\x00\n\r]*$")
+
+// Validate checks the field values on GrpcService_GoogleGrpc with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *GrpcService_GoogleGrpc) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on GrpcService_GoogleGrpc with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// GrpcService_GoogleGrpcMultiError, or nil if none found.
+func (m *GrpcService_GoogleGrpc) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *GrpcService_GoogleGrpc) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if utf8.RuneCountInString(m.GetTargetUri()) < 1 {
+ err := GrpcService_GoogleGrpcValidationError{
+ field: "TargetUri",
+ reason: "value length must be at least 1 runes",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetChannelCredentials()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, GrpcService_GoogleGrpcValidationError{
+ field: "ChannelCredentials",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, GrpcService_GoogleGrpcValidationError{
+ field: "ChannelCredentials",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetChannelCredentials()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return GrpcService_GoogleGrpcValidationError{
+ field: "ChannelCredentials",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ for idx, item := range m.GetCallCredentials() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, GrpcService_GoogleGrpcValidationError{
+ field: fmt.Sprintf("CallCredentials[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, GrpcService_GoogleGrpcValidationError{
+ field: fmt.Sprintf("CallCredentials[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return GrpcService_GoogleGrpcValidationError{
+ field: fmt.Sprintf("CallCredentials[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if utf8.RuneCountInString(m.GetStatPrefix()) < 1 {
+ err := GrpcService_GoogleGrpcValidationError{
+ field: "StatPrefix",
+ reason: "value length must be at least 1 runes",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ // no validation rules for CredentialsFactoryName
+
+ if all {
+ switch v := interface{}(m.GetConfig()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, GrpcService_GoogleGrpcValidationError{
+ field: "Config",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, GrpcService_GoogleGrpcValidationError{
+ field: "Config",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetConfig()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return GrpcService_GoogleGrpcValidationError{
+ field: "Config",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetPerStreamBufferLimitBytes()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, GrpcService_GoogleGrpcValidationError{
+ field: "PerStreamBufferLimitBytes",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, GrpcService_GoogleGrpcValidationError{
+ field: "PerStreamBufferLimitBytes",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetPerStreamBufferLimitBytes()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return GrpcService_GoogleGrpcValidationError{
+ field: "PerStreamBufferLimitBytes",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetChannelArgs()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, GrpcService_GoogleGrpcValidationError{
+ field: "ChannelArgs",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, GrpcService_GoogleGrpcValidationError{
+ field: "ChannelArgs",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetChannelArgs()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return GrpcService_GoogleGrpcValidationError{
+ field: "ChannelArgs",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return GrpcService_GoogleGrpcMultiError(errors)
+ }
+
+ return nil
+}
+
+// GrpcService_GoogleGrpcMultiError is an error wrapping multiple validation
+// errors returned by GrpcService_GoogleGrpc.ValidateAll() if the designated
+// constraints aren't met.
+type GrpcService_GoogleGrpcMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m GrpcService_GoogleGrpcMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m GrpcService_GoogleGrpcMultiError) AllErrors() []error { return m }
+
+// GrpcService_GoogleGrpcValidationError is the validation error returned by
+// GrpcService_GoogleGrpc.Validate if the designated constraints aren't met.
+type GrpcService_GoogleGrpcValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e GrpcService_GoogleGrpcValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e GrpcService_GoogleGrpcValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e GrpcService_GoogleGrpcValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e GrpcService_GoogleGrpcValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e GrpcService_GoogleGrpcValidationError) ErrorName() string {
+ return "GrpcService_GoogleGrpcValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e GrpcService_GoogleGrpcValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sGrpcService_GoogleGrpc.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = GrpcService_GoogleGrpcValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = GrpcService_GoogleGrpcValidationError{}
+
+// Validate checks the field values on GrpcService_GoogleGrpc_SslCredentials
+// with the rules defined in the proto definition for this message. If any
+// rules are violated, the first error encountered is returned, or nil if
+// there are no violations.
+func (m *GrpcService_GoogleGrpc_SslCredentials) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on GrpcService_GoogleGrpc_SslCredentials
+// with the rules defined in the proto definition for this message. If any
+// rules are violated, the result is a list of violation errors wrapped in
+// GrpcService_GoogleGrpc_SslCredentialsMultiError, or nil if none found.
+func (m *GrpcService_GoogleGrpc_SslCredentials) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *GrpcService_GoogleGrpc_SslCredentials) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if all {
+ switch v := interface{}(m.GetRootCerts()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, GrpcService_GoogleGrpc_SslCredentialsValidationError{
+ field: "RootCerts",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, GrpcService_GoogleGrpc_SslCredentialsValidationError{
+ field: "RootCerts",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetRootCerts()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return GrpcService_GoogleGrpc_SslCredentialsValidationError{
+ field: "RootCerts",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetPrivateKey()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, GrpcService_GoogleGrpc_SslCredentialsValidationError{
+ field: "PrivateKey",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, GrpcService_GoogleGrpc_SslCredentialsValidationError{
+ field: "PrivateKey",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetPrivateKey()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return GrpcService_GoogleGrpc_SslCredentialsValidationError{
+ field: "PrivateKey",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetCertChain()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, GrpcService_GoogleGrpc_SslCredentialsValidationError{
+ field: "CertChain",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, GrpcService_GoogleGrpc_SslCredentialsValidationError{
+ field: "CertChain",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetCertChain()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return GrpcService_GoogleGrpc_SslCredentialsValidationError{
+ field: "CertChain",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return GrpcService_GoogleGrpc_SslCredentialsMultiError(errors)
+ }
+
+ return nil
+}
+
+// GrpcService_GoogleGrpc_SslCredentialsMultiError is an error wrapping
+// multiple validation errors returned by
+// GrpcService_GoogleGrpc_SslCredentials.ValidateAll() if the designated
+// constraints aren't met.
+type GrpcService_GoogleGrpc_SslCredentialsMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m GrpcService_GoogleGrpc_SslCredentialsMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m GrpcService_GoogleGrpc_SslCredentialsMultiError) AllErrors() []error { return m }
+
+// GrpcService_GoogleGrpc_SslCredentialsValidationError is the validation error
+// returned by GrpcService_GoogleGrpc_SslCredentials.Validate if the
+// designated constraints aren't met.
+type GrpcService_GoogleGrpc_SslCredentialsValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e GrpcService_GoogleGrpc_SslCredentialsValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e GrpcService_GoogleGrpc_SslCredentialsValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e GrpcService_GoogleGrpc_SslCredentialsValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e GrpcService_GoogleGrpc_SslCredentialsValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e GrpcService_GoogleGrpc_SslCredentialsValidationError) ErrorName() string {
+ return "GrpcService_GoogleGrpc_SslCredentialsValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e GrpcService_GoogleGrpc_SslCredentialsValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sGrpcService_GoogleGrpc_SslCredentials.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = GrpcService_GoogleGrpc_SslCredentialsValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = GrpcService_GoogleGrpc_SslCredentialsValidationError{}
+
+// Validate checks the field values on
+// GrpcService_GoogleGrpc_GoogleLocalCredentials with the rules defined in the
+// proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *GrpcService_GoogleGrpc_GoogleLocalCredentials) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on
+// GrpcService_GoogleGrpc_GoogleLocalCredentials with the rules defined in the
+// proto definition for this message. If any rules are violated, the result is
+// a list of violation errors wrapped in
+// GrpcService_GoogleGrpc_GoogleLocalCredentialsMultiError, or nil if none found.
+func (m *GrpcService_GoogleGrpc_GoogleLocalCredentials) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *GrpcService_GoogleGrpc_GoogleLocalCredentials) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if len(errors) > 0 {
+ return GrpcService_GoogleGrpc_GoogleLocalCredentialsMultiError(errors)
+ }
+
+ return nil
+}
+
+// GrpcService_GoogleGrpc_GoogleLocalCredentialsMultiError is an error wrapping
+// multiple validation errors returned by
+// GrpcService_GoogleGrpc_GoogleLocalCredentials.ValidateAll() if the
+// designated constraints aren't met.
+type GrpcService_GoogleGrpc_GoogleLocalCredentialsMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m GrpcService_GoogleGrpc_GoogleLocalCredentialsMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m GrpcService_GoogleGrpc_GoogleLocalCredentialsMultiError) AllErrors() []error { return m }
+
+// GrpcService_GoogleGrpc_GoogleLocalCredentialsValidationError is the
+// validation error returned by
+// GrpcService_GoogleGrpc_GoogleLocalCredentials.Validate if the designated
+// constraints aren't met.
+type GrpcService_GoogleGrpc_GoogleLocalCredentialsValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e GrpcService_GoogleGrpc_GoogleLocalCredentialsValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e GrpcService_GoogleGrpc_GoogleLocalCredentialsValidationError) Reason() string {
+ return e.reason
+}
+
+// Cause function returns cause value.
+func (e GrpcService_GoogleGrpc_GoogleLocalCredentialsValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e GrpcService_GoogleGrpc_GoogleLocalCredentialsValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e GrpcService_GoogleGrpc_GoogleLocalCredentialsValidationError) ErrorName() string {
+ return "GrpcService_GoogleGrpc_GoogleLocalCredentialsValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e GrpcService_GoogleGrpc_GoogleLocalCredentialsValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sGrpcService_GoogleGrpc_GoogleLocalCredentials.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = GrpcService_GoogleGrpc_GoogleLocalCredentialsValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = GrpcService_GoogleGrpc_GoogleLocalCredentialsValidationError{}
+
+// Validate checks the field values on
+// GrpcService_GoogleGrpc_ChannelCredentials with the rules defined in the
+// proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *GrpcService_GoogleGrpc_ChannelCredentials) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on
+// GrpcService_GoogleGrpc_ChannelCredentials with the rules defined in the
+// proto definition for this message. If any rules are violated, the result is
+// a list of violation errors wrapped in
+// GrpcService_GoogleGrpc_ChannelCredentialsMultiError, or nil if none found.
+func (m *GrpcService_GoogleGrpc_ChannelCredentials) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *GrpcService_GoogleGrpc_ChannelCredentials) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ oneofCredentialSpecifierPresent := false
+ switch v := m.CredentialSpecifier.(type) {
+ case *GrpcService_GoogleGrpc_ChannelCredentials_SslCredentials:
+ if v == nil {
+ err := GrpcService_GoogleGrpc_ChannelCredentialsValidationError{
+ field: "CredentialSpecifier",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofCredentialSpecifierPresent = true
+
+ if all {
+ switch v := interface{}(m.GetSslCredentials()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, GrpcService_GoogleGrpc_ChannelCredentialsValidationError{
+ field: "SslCredentials",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, GrpcService_GoogleGrpc_ChannelCredentialsValidationError{
+ field: "SslCredentials",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetSslCredentials()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return GrpcService_GoogleGrpc_ChannelCredentialsValidationError{
+ field: "SslCredentials",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *GrpcService_GoogleGrpc_ChannelCredentials_GoogleDefault:
+ if v == nil {
+ err := GrpcService_GoogleGrpc_ChannelCredentialsValidationError{
+ field: "CredentialSpecifier",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofCredentialSpecifierPresent = true
+
+ if all {
+ switch v := interface{}(m.GetGoogleDefault()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, GrpcService_GoogleGrpc_ChannelCredentialsValidationError{
+ field: "GoogleDefault",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, GrpcService_GoogleGrpc_ChannelCredentialsValidationError{
+ field: "GoogleDefault",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetGoogleDefault()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return GrpcService_GoogleGrpc_ChannelCredentialsValidationError{
+ field: "GoogleDefault",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *GrpcService_GoogleGrpc_ChannelCredentials_LocalCredentials:
+ if v == nil {
+ err := GrpcService_GoogleGrpc_ChannelCredentialsValidationError{
+ field: "CredentialSpecifier",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofCredentialSpecifierPresent = true
+
+ if all {
+ switch v := interface{}(m.GetLocalCredentials()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, GrpcService_GoogleGrpc_ChannelCredentialsValidationError{
+ field: "LocalCredentials",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, GrpcService_GoogleGrpc_ChannelCredentialsValidationError{
+ field: "LocalCredentials",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetLocalCredentials()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return GrpcService_GoogleGrpc_ChannelCredentialsValidationError{
+ field: "LocalCredentials",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ default:
+ _ = v // ensures v is used
+ }
+ if !oneofCredentialSpecifierPresent {
+ err := GrpcService_GoogleGrpc_ChannelCredentialsValidationError{
+ field: "CredentialSpecifier",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return GrpcService_GoogleGrpc_ChannelCredentialsMultiError(errors)
+ }
+
+ return nil
+}
+
+// GrpcService_GoogleGrpc_ChannelCredentialsMultiError is an error wrapping
+// multiple validation errors returned by
+// GrpcService_GoogleGrpc_ChannelCredentials.ValidateAll() if the designated
+// constraints aren't met.
+type GrpcService_GoogleGrpc_ChannelCredentialsMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m GrpcService_GoogleGrpc_ChannelCredentialsMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m GrpcService_GoogleGrpc_ChannelCredentialsMultiError) AllErrors() []error { return m }
+
+// GrpcService_GoogleGrpc_ChannelCredentialsValidationError is the validation
+// error returned by GrpcService_GoogleGrpc_ChannelCredentials.Validate if the
+// designated constraints aren't met.
+type GrpcService_GoogleGrpc_ChannelCredentialsValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e GrpcService_GoogleGrpc_ChannelCredentialsValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e GrpcService_GoogleGrpc_ChannelCredentialsValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e GrpcService_GoogleGrpc_ChannelCredentialsValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e GrpcService_GoogleGrpc_ChannelCredentialsValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e GrpcService_GoogleGrpc_ChannelCredentialsValidationError) ErrorName() string {
+ return "GrpcService_GoogleGrpc_ChannelCredentialsValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e GrpcService_GoogleGrpc_ChannelCredentialsValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sGrpcService_GoogleGrpc_ChannelCredentials.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = GrpcService_GoogleGrpc_ChannelCredentialsValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = GrpcService_GoogleGrpc_ChannelCredentialsValidationError{}
+
+// Validate checks the field values on GrpcService_GoogleGrpc_CallCredentials
+// with the rules defined in the proto definition for this message. If any
+// rules are violated, the first error encountered is returned, or nil if
+// there are no violations.
+func (m *GrpcService_GoogleGrpc_CallCredentials) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on
+// GrpcService_GoogleGrpc_CallCredentials with the rules defined in the proto
+// definition for this message. If any rules are violated, the result is a
+// list of violation errors wrapped in
+// GrpcService_GoogleGrpc_CallCredentialsMultiError, or nil if none found.
+func (m *GrpcService_GoogleGrpc_CallCredentials) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *GrpcService_GoogleGrpc_CallCredentials) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ oneofCredentialSpecifierPresent := false
+ switch v := m.CredentialSpecifier.(type) {
+ case *GrpcService_GoogleGrpc_CallCredentials_AccessToken:
+ if v == nil {
+ err := GrpcService_GoogleGrpc_CallCredentialsValidationError{
+ field: "CredentialSpecifier",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofCredentialSpecifierPresent = true
+ // no validation rules for AccessToken
+ case *GrpcService_GoogleGrpc_CallCredentials_GoogleComputeEngine:
+ if v == nil {
+ err := GrpcService_GoogleGrpc_CallCredentialsValidationError{
+ field: "CredentialSpecifier",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofCredentialSpecifierPresent = true
+
+ if all {
+ switch v := interface{}(m.GetGoogleComputeEngine()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, GrpcService_GoogleGrpc_CallCredentialsValidationError{
+ field: "GoogleComputeEngine",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, GrpcService_GoogleGrpc_CallCredentialsValidationError{
+ field: "GoogleComputeEngine",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetGoogleComputeEngine()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return GrpcService_GoogleGrpc_CallCredentialsValidationError{
+ field: "GoogleComputeEngine",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *GrpcService_GoogleGrpc_CallCredentials_GoogleRefreshToken:
+ if v == nil {
+ err := GrpcService_GoogleGrpc_CallCredentialsValidationError{
+ field: "CredentialSpecifier",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofCredentialSpecifierPresent = true
+ // no validation rules for GoogleRefreshToken
+ case *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJwtAccess:
+ if v == nil {
+ err := GrpcService_GoogleGrpc_CallCredentialsValidationError{
+ field: "CredentialSpecifier",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofCredentialSpecifierPresent = true
+
+ if all {
+ switch v := interface{}(m.GetServiceAccountJwtAccess()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, GrpcService_GoogleGrpc_CallCredentialsValidationError{
+ field: "ServiceAccountJwtAccess",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, GrpcService_GoogleGrpc_CallCredentialsValidationError{
+ field: "ServiceAccountJwtAccess",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetServiceAccountJwtAccess()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return GrpcService_GoogleGrpc_CallCredentialsValidationError{
+ field: "ServiceAccountJwtAccess",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *GrpcService_GoogleGrpc_CallCredentials_GoogleIam:
+ if v == nil {
+ err := GrpcService_GoogleGrpc_CallCredentialsValidationError{
+ field: "CredentialSpecifier",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofCredentialSpecifierPresent = true
+
+ if all {
+ switch v := interface{}(m.GetGoogleIam()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, GrpcService_GoogleGrpc_CallCredentialsValidationError{
+ field: "GoogleIam",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, GrpcService_GoogleGrpc_CallCredentialsValidationError{
+ field: "GoogleIam",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetGoogleIam()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return GrpcService_GoogleGrpc_CallCredentialsValidationError{
+ field: "GoogleIam",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *GrpcService_GoogleGrpc_CallCredentials_FromPlugin:
+ if v == nil {
+ err := GrpcService_GoogleGrpc_CallCredentialsValidationError{
+ field: "CredentialSpecifier",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofCredentialSpecifierPresent = true
+
+ if all {
+ switch v := interface{}(m.GetFromPlugin()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, GrpcService_GoogleGrpc_CallCredentialsValidationError{
+ field: "FromPlugin",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, GrpcService_GoogleGrpc_CallCredentialsValidationError{
+ field: "FromPlugin",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetFromPlugin()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return GrpcService_GoogleGrpc_CallCredentialsValidationError{
+ field: "FromPlugin",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *GrpcService_GoogleGrpc_CallCredentials_StsService_:
+ if v == nil {
+ err := GrpcService_GoogleGrpc_CallCredentialsValidationError{
+ field: "CredentialSpecifier",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofCredentialSpecifierPresent = true
+
+ if all {
+ switch v := interface{}(m.GetStsService()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, GrpcService_GoogleGrpc_CallCredentialsValidationError{
+ field: "StsService",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, GrpcService_GoogleGrpc_CallCredentialsValidationError{
+ field: "StsService",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetStsService()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return GrpcService_GoogleGrpc_CallCredentialsValidationError{
+ field: "StsService",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ default:
+ _ = v // ensures v is used
+ }
+ if !oneofCredentialSpecifierPresent {
+ err := GrpcService_GoogleGrpc_CallCredentialsValidationError{
+ field: "CredentialSpecifier",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return GrpcService_GoogleGrpc_CallCredentialsMultiError(errors)
+ }
+
+ return nil
+}
+
+// GrpcService_GoogleGrpc_CallCredentialsMultiError is an error wrapping
+// multiple validation errors returned by
+// GrpcService_GoogleGrpc_CallCredentials.ValidateAll() if the designated
+// constraints aren't met.
+type GrpcService_GoogleGrpc_CallCredentialsMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m GrpcService_GoogleGrpc_CallCredentialsMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m GrpcService_GoogleGrpc_CallCredentialsMultiError) AllErrors() []error { return m }
+
+// GrpcService_GoogleGrpc_CallCredentialsValidationError is the validation
+// error returned by GrpcService_GoogleGrpc_CallCredentials.Validate if the
+// designated constraints aren't met.
+type GrpcService_GoogleGrpc_CallCredentialsValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e GrpcService_GoogleGrpc_CallCredentialsValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e GrpcService_GoogleGrpc_CallCredentialsValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e GrpcService_GoogleGrpc_CallCredentialsValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e GrpcService_GoogleGrpc_CallCredentialsValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e GrpcService_GoogleGrpc_CallCredentialsValidationError) ErrorName() string {
+ return "GrpcService_GoogleGrpc_CallCredentialsValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e GrpcService_GoogleGrpc_CallCredentialsValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sGrpcService_GoogleGrpc_CallCredentials.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = GrpcService_GoogleGrpc_CallCredentialsValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = GrpcService_GoogleGrpc_CallCredentialsValidationError{}
+
+// Validate checks the field values on GrpcService_GoogleGrpc_ChannelArgs with
+// the rules defined in the proto definition for this message. If any rules
+// are violated, the first error encountered is returned, or nil if there are
+// no violations.
+func (m *GrpcService_GoogleGrpc_ChannelArgs) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on GrpcService_GoogleGrpc_ChannelArgs
+// with the rules defined in the proto definition for this message. If any
+// rules are violated, the result is a list of violation errors wrapped in
+// GrpcService_GoogleGrpc_ChannelArgsMultiError, or nil if none found.
+func (m *GrpcService_GoogleGrpc_ChannelArgs) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *GrpcService_GoogleGrpc_ChannelArgs) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ {
+ sorted_keys := make([]string, len(m.GetArgs()))
+ i := 0
+ for key := range m.GetArgs() {
+ sorted_keys[i] = key
+ i++
+ }
+ sort.Slice(sorted_keys, func(i, j int) bool { return sorted_keys[i] < sorted_keys[j] })
+ for _, key := range sorted_keys {
+ val := m.GetArgs()[key]
+ _ = val
+
+ // no validation rules for Args[key]
+
+ if all {
+ switch v := interface{}(val).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, GrpcService_GoogleGrpc_ChannelArgsValidationError{
+ field: fmt.Sprintf("Args[%v]", key),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, GrpcService_GoogleGrpc_ChannelArgsValidationError{
+ field: fmt.Sprintf("Args[%v]", key),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(val).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return GrpcService_GoogleGrpc_ChannelArgsValidationError{
+ field: fmt.Sprintf("Args[%v]", key),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+ }
+
+ if len(errors) > 0 {
+ return GrpcService_GoogleGrpc_ChannelArgsMultiError(errors)
+ }
+
+ return nil
+}
+
+// GrpcService_GoogleGrpc_ChannelArgsMultiError is an error wrapping multiple
+// validation errors returned by
+// GrpcService_GoogleGrpc_ChannelArgs.ValidateAll() if the designated
+// constraints aren't met.
+type GrpcService_GoogleGrpc_ChannelArgsMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m GrpcService_GoogleGrpc_ChannelArgsMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m GrpcService_GoogleGrpc_ChannelArgsMultiError) AllErrors() []error { return m }
+
+// GrpcService_GoogleGrpc_ChannelArgsValidationError is the validation error
+// returned by GrpcService_GoogleGrpc_ChannelArgs.Validate if the designated
+// constraints aren't met.
+type GrpcService_GoogleGrpc_ChannelArgsValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e GrpcService_GoogleGrpc_ChannelArgsValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e GrpcService_GoogleGrpc_ChannelArgsValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e GrpcService_GoogleGrpc_ChannelArgsValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e GrpcService_GoogleGrpc_ChannelArgsValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e GrpcService_GoogleGrpc_ChannelArgsValidationError) ErrorName() string {
+ return "GrpcService_GoogleGrpc_ChannelArgsValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e GrpcService_GoogleGrpc_ChannelArgsValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sGrpcService_GoogleGrpc_ChannelArgs.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = GrpcService_GoogleGrpc_ChannelArgsValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = GrpcService_GoogleGrpc_ChannelArgsValidationError{}
+
+// Validate checks the field values on
+// GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials
+// with the rules defined in the proto definition for this message. If any
+// rules are violated, the first error encountered is returned, or nil if
+// there are no violations.
+func (m *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on
+// GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials
+// with the rules defined in the proto definition for this message. If any
+// rules are violated, the result is a list of violation errors wrapped in
+// GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsMultiError,
+// or nil if none found.
+func (m *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for JsonKey
+
+ // no validation rules for TokenLifetimeSeconds
+
+ if len(errors) > 0 {
+ return GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsMultiError(errors)
+ }
+
+ return nil
+}
+
+// GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsMultiError
+// is an error wrapping multiple validation errors returned by
+// GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials.ValidateAll()
+// if the designated constraints aren't met.
+type GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsMultiError) AllErrors() []error {
+ return m
+}
+
+// GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsValidationError
+// is the validation error returned by
+// GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials.Validate
+// if the designated constraints aren't met.
+type GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsValidationError) Field() string {
+ return e.field
+}
+
+// Reason function returns reason value.
+func (e GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsValidationError) Reason() string {
+ return e.reason
+}
+
+// Cause function returns cause value.
+func (e GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsValidationError) Cause() error {
+ return e.cause
+}
+
+// Key function returns key value.
+func (e GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsValidationError) Key() bool {
+ return e.key
+}
+
+// ErrorName returns error name.
+func (e GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsValidationError) ErrorName() string {
+ return "GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sGrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentials.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = GrpcService_GoogleGrpc_CallCredentials_ServiceAccountJWTAccessCredentialsValidationError{}
+
+// Validate checks the field values on
+// GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on
+// GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsMultiError, or
+// nil if none found.
+func (m *GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for AuthorizationToken
+
+ // no validation rules for AuthoritySelector
+
+ if len(errors) > 0 {
+ return GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsMultiError(errors)
+ }
+
+ return nil
+}
+
+// GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsMultiError is an
+// error wrapping multiple validation errors returned by
+// GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials.ValidateAll()
+// if the designated constraints aren't met.
+type GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsMultiError) AllErrors() []error {
+ return m
+}
+
+// GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsValidationError
+// is the validation error returned by
+// GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials.Validate if the
+// designated constraints aren't met.
+type GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsValidationError) Field() string {
+ return e.field
+}
+
+// Reason function returns reason value.
+func (e GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsValidationError) Reason() string {
+ return e.reason
+}
+
+// Cause function returns cause value.
+func (e GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsValidationError) Cause() error {
+ return e.cause
+}
+
+// Key function returns key value.
+func (e GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsValidationError) Key() bool {
+ return e.key
+}
+
+// ErrorName returns error name.
+func (e GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsValidationError) ErrorName() string {
+ return "GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sGrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentials.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = GrpcService_GoogleGrpc_CallCredentials_GoogleIAMCredentialsValidationError{}
+
+// Validate checks the field values on
+// GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin with
+// the rules defined in the proto definition for this message. If any rules
+// are violated, the first error encountered is returned, or nil if there are
+// no violations.
+func (m *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on
+// GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin with
+// the rules defined in the proto definition for this message. If any rules
+// are violated, the result is a list of violation errors wrapped in
+// GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginMultiError,
+// or nil if none found.
+func (m *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for Name
+
+ switch v := m.ConfigType.(type) {
+ case *GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin_TypedConfig:
+ if v == nil {
+ err := GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginValidationError{
+ field: "ConfigType",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetTypedConfig()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginValidationError{
+ field: "TypedConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginValidationError{
+ field: "TypedConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetTypedConfig()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginValidationError{
+ field: "TypedConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ default:
+ _ = v // ensures v is used
+ }
+
+ if len(errors) > 0 {
+ return GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginMultiError(errors)
+ }
+
+ return nil
+}
+
+// GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginMultiError
+// is an error wrapping multiple validation errors returned by
+// GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin.ValidateAll()
+// if the designated constraints aren't met.
+type GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginMultiError) AllErrors() []error {
+ return m
+}
+
+// GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginValidationError
+// is the validation error returned by
+// GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin.Validate
+// if the designated constraints aren't met.
+type GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginValidationError) Field() string {
+ return e.field
+}
+
+// Reason function returns reason value.
+func (e GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginValidationError) Reason() string {
+ return e.reason
+}
+
+// Cause function returns cause value.
+func (e GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginValidationError) Cause() error {
+ return e.cause
+}
+
+// Key function returns key value.
+func (e GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginValidationError) Key() bool {
+ return e.key
+}
+
+// ErrorName returns error name.
+func (e GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginValidationError) ErrorName() string {
+ return "GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sGrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPlugin.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = GrpcService_GoogleGrpc_CallCredentials_MetadataCredentialsFromPluginValidationError{}
+
+// Validate checks the field values on
+// GrpcService_GoogleGrpc_CallCredentials_StsService with the rules defined in
+// the proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *GrpcService_GoogleGrpc_CallCredentials_StsService) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on
+// GrpcService_GoogleGrpc_CallCredentials_StsService with the rules defined in
+// the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in
+// GrpcService_GoogleGrpc_CallCredentials_StsServiceMultiError, or nil if none found.
+func (m *GrpcService_GoogleGrpc_CallCredentials_StsService) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *GrpcService_GoogleGrpc_CallCredentials_StsService) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for TokenExchangeServiceUri
+
+ // no validation rules for Resource
+
+ // no validation rules for Audience
+
+ // no validation rules for Scope
+
+ // no validation rules for RequestedTokenType
+
+ if utf8.RuneCountInString(m.GetSubjectTokenPath()) < 1 {
+ err := GrpcService_GoogleGrpc_CallCredentials_StsServiceValidationError{
+ field: "SubjectTokenPath",
+ reason: "value length must be at least 1 runes",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if utf8.RuneCountInString(m.GetSubjectTokenType()) < 1 {
+ err := GrpcService_GoogleGrpc_CallCredentials_StsServiceValidationError{
+ field: "SubjectTokenType",
+ reason: "value length must be at least 1 runes",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ // no validation rules for ActorTokenPath
+
+ // no validation rules for ActorTokenType
+
+ if len(errors) > 0 {
+ return GrpcService_GoogleGrpc_CallCredentials_StsServiceMultiError(errors)
+ }
+
+ return nil
+}
+
+// GrpcService_GoogleGrpc_CallCredentials_StsServiceMultiError is an error
+// wrapping multiple validation errors returned by
+// GrpcService_GoogleGrpc_CallCredentials_StsService.ValidateAll() if the
+// designated constraints aren't met.
+type GrpcService_GoogleGrpc_CallCredentials_StsServiceMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m GrpcService_GoogleGrpc_CallCredentials_StsServiceMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m GrpcService_GoogleGrpc_CallCredentials_StsServiceMultiError) AllErrors() []error { return m }
+
+// GrpcService_GoogleGrpc_CallCredentials_StsServiceValidationError is the
+// validation error returned by
+// GrpcService_GoogleGrpc_CallCredentials_StsService.Validate if the
+// designated constraints aren't met.
+type GrpcService_GoogleGrpc_CallCredentials_StsServiceValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e GrpcService_GoogleGrpc_CallCredentials_StsServiceValidationError) Field() string {
+ return e.field
+}
+
+// Reason function returns reason value.
+func (e GrpcService_GoogleGrpc_CallCredentials_StsServiceValidationError) Reason() string {
+ return e.reason
+}
+
+// Cause function returns cause value.
+func (e GrpcService_GoogleGrpc_CallCredentials_StsServiceValidationError) Cause() error {
+ return e.cause
+}
+
+// Key function returns key value.
+func (e GrpcService_GoogleGrpc_CallCredentials_StsServiceValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e GrpcService_GoogleGrpc_CallCredentials_StsServiceValidationError) ErrorName() string {
+ return "GrpcService_GoogleGrpc_CallCredentials_StsServiceValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e GrpcService_GoogleGrpc_CallCredentials_StsServiceValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sGrpcService_GoogleGrpc_CallCredentials_StsService.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = GrpcService_GoogleGrpc_CallCredentials_StsServiceValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = GrpcService_GoogleGrpc_CallCredentials_StsServiceValidationError{}
+
+// Validate checks the field values on GrpcService_GoogleGrpc_ChannelArgs_Value
+// with the rules defined in the proto definition for this message. If any
+// rules are violated, the first error encountered is returned, or nil if
+// there are no violations.
+func (m *GrpcService_GoogleGrpc_ChannelArgs_Value) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on
+// GrpcService_GoogleGrpc_ChannelArgs_Value with the rules defined in the
+// proto definition for this message. If any rules are violated, the result is
+// a list of violation errors wrapped in
+// GrpcService_GoogleGrpc_ChannelArgs_ValueMultiError, or nil if none found.
+func (m *GrpcService_GoogleGrpc_ChannelArgs_Value) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *GrpcService_GoogleGrpc_ChannelArgs_Value) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ oneofValueSpecifierPresent := false
+ switch v := m.ValueSpecifier.(type) {
+ case *GrpcService_GoogleGrpc_ChannelArgs_Value_StringValue:
+ if v == nil {
+ err := GrpcService_GoogleGrpc_ChannelArgs_ValueValidationError{
+ field: "ValueSpecifier",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofValueSpecifierPresent = true
+ // no validation rules for StringValue
+ case *GrpcService_GoogleGrpc_ChannelArgs_Value_IntValue:
+ if v == nil {
+ err := GrpcService_GoogleGrpc_ChannelArgs_ValueValidationError{
+ field: "ValueSpecifier",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofValueSpecifierPresent = true
+ // no validation rules for IntValue
+ default:
+ _ = v // ensures v is used
+ }
+ if !oneofValueSpecifierPresent {
+ err := GrpcService_GoogleGrpc_ChannelArgs_ValueValidationError{
+ field: "ValueSpecifier",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return GrpcService_GoogleGrpc_ChannelArgs_ValueMultiError(errors)
+ }
+
+ return nil
+}
+
+// GrpcService_GoogleGrpc_ChannelArgs_ValueMultiError is an error wrapping
+// multiple validation errors returned by
+// GrpcService_GoogleGrpc_ChannelArgs_Value.ValidateAll() if the designated
+// constraints aren't met.
+type GrpcService_GoogleGrpc_ChannelArgs_ValueMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m GrpcService_GoogleGrpc_ChannelArgs_ValueMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m GrpcService_GoogleGrpc_ChannelArgs_ValueMultiError) AllErrors() []error { return m }
+
+// GrpcService_GoogleGrpc_ChannelArgs_ValueValidationError is the validation
+// error returned by GrpcService_GoogleGrpc_ChannelArgs_Value.Validate if the
+// designated constraints aren't met.
+type GrpcService_GoogleGrpc_ChannelArgs_ValueValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e GrpcService_GoogleGrpc_ChannelArgs_ValueValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e GrpcService_GoogleGrpc_ChannelArgs_ValueValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e GrpcService_GoogleGrpc_ChannelArgs_ValueValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e GrpcService_GoogleGrpc_ChannelArgs_ValueValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e GrpcService_GoogleGrpc_ChannelArgs_ValueValidationError) ErrorName() string {
+ return "GrpcService_GoogleGrpc_ChannelArgs_ValueValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e GrpcService_GoogleGrpc_ChannelArgs_ValueValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sGrpcService_GoogleGrpc_ChannelArgs_Value.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = GrpcService_GoogleGrpc_ChannelArgs_ValueValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = GrpcService_GoogleGrpc_ChannelArgs_ValueValidationError{}
diff --git a/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/health_check.pb.go b/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/health_check.pb.go
new file mode 100644
index 000000000..007697146
--- /dev/null
+++ b/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/health_check.pb.go
@@ -0,0 +1,1670 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.23.0
+// protoc v4.23.1
+// source: envoy/config/core/v3/health_check.proto
+
+package corev3
+
+import (
+ _ "github.com/cilium/proxy/go/envoy/annotations"
+ v31 "github.com/cilium/proxy/go/envoy/type/matcher/v3"
+ v3 "github.com/cilium/proxy/go/envoy/type/v3"
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ proto "github.com/golang/protobuf/proto"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ durationpb "google.golang.org/protobuf/types/known/durationpb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+ wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+// Endpoint health status.
+type HealthStatus int32
+
+const (
+ // The health status is not known. This is interpreted by Envoy as “HEALTHY“.
+ HealthStatus_UNKNOWN HealthStatus = 0
+ // Healthy.
+ HealthStatus_HEALTHY HealthStatus = 1
+ // Unhealthy.
+ HealthStatus_UNHEALTHY HealthStatus = 2
+ // Connection draining in progress. E.g.,
+ // ``_
+ // or
+ // ``_.
+ // This is interpreted by Envoy as “UNHEALTHY“.
+ HealthStatus_DRAINING HealthStatus = 3
+ // Health check timed out. This is part of HDS and is interpreted by Envoy as
+ // “UNHEALTHY“.
+ HealthStatus_TIMEOUT HealthStatus = 4
+ // Degraded.
+ HealthStatus_DEGRADED HealthStatus = 5
+)
+
+// Enum value maps for HealthStatus.
+var (
+ HealthStatus_name = map[int32]string{
+ 0: "UNKNOWN",
+ 1: "HEALTHY",
+ 2: "UNHEALTHY",
+ 3: "DRAINING",
+ 4: "TIMEOUT",
+ 5: "DEGRADED",
+ }
+ HealthStatus_value = map[string]int32{
+ "UNKNOWN": 0,
+ "HEALTHY": 1,
+ "UNHEALTHY": 2,
+ "DRAINING": 3,
+ "TIMEOUT": 4,
+ "DEGRADED": 5,
+ }
+)
+
+func (x HealthStatus) Enum() *HealthStatus {
+ p := new(HealthStatus)
+ *p = x
+ return p
+}
+
+func (x HealthStatus) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (HealthStatus) Descriptor() protoreflect.EnumDescriptor {
+ return file_envoy_config_core_v3_health_check_proto_enumTypes[0].Descriptor()
+}
+
+func (HealthStatus) Type() protoreflect.EnumType {
+ return &file_envoy_config_core_v3_health_check_proto_enumTypes[0]
+}
+
+func (x HealthStatus) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use HealthStatus.Descriptor instead.
+func (HealthStatus) EnumDescriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_health_check_proto_rawDescGZIP(), []int{0}
+}
+
+type HealthStatusSet struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // An order-independent set of health status.
+ Statuses []HealthStatus `protobuf:"varint,1,rep,packed,name=statuses,proto3,enum=envoy.config.core.v3.HealthStatus" json:"statuses,omitempty"`
+}
+
+func (x *HealthStatusSet) Reset() {
+ *x = HealthStatusSet{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_health_check_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *HealthStatusSet) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HealthStatusSet) ProtoMessage() {}
+
+func (x *HealthStatusSet) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_health_check_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HealthStatusSet.ProtoReflect.Descriptor instead.
+func (*HealthStatusSet) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_health_check_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *HealthStatusSet) GetStatuses() []HealthStatus {
+ if x != nil {
+ return x.Statuses
+ }
+ return nil
+}
+
+// [#next-free-field: 26]
+type HealthCheck struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The time to wait for a health check response. If the timeout is reached the
+ // health check attempt will be considered a failure.
+ Timeout *durationpb.Duration `protobuf:"bytes,1,opt,name=timeout,proto3" json:"timeout,omitempty"`
+ // The interval between health checks.
+ Interval *durationpb.Duration `protobuf:"bytes,2,opt,name=interval,proto3" json:"interval,omitempty"`
+ // An optional jitter amount in milliseconds. If specified, Envoy will start health
+ // checking after for a random time in ms between 0 and initial_jitter. This only
+ // applies to the first health check.
+ InitialJitter *durationpb.Duration `protobuf:"bytes,20,opt,name=initial_jitter,json=initialJitter,proto3" json:"initial_jitter,omitempty"`
+ // An optional jitter amount in milliseconds. If specified, during every
+ // interval Envoy will add interval_jitter to the wait time.
+ IntervalJitter *durationpb.Duration `protobuf:"bytes,3,opt,name=interval_jitter,json=intervalJitter,proto3" json:"interval_jitter,omitempty"`
+ // An optional jitter amount as a percentage of interval_ms. If specified,
+ // during every interval Envoy will add “interval_ms“ *
+ // “interval_jitter_percent“ / 100 to the wait time.
+ //
+ // If interval_jitter_ms and interval_jitter_percent are both set, both of
+ // them will be used to increase the wait time.
+ IntervalJitterPercent uint32 `protobuf:"varint,18,opt,name=interval_jitter_percent,json=intervalJitterPercent,proto3" json:"interval_jitter_percent,omitempty"`
+ // The number of unhealthy health checks required before a host is marked
+ // unhealthy. Note that for “http“ health checking if a host responds with a code not in
+ // :ref:`expected_statuses `
+ // or :ref:`retriable_statuses `,
+ // this threshold is ignored and the host is considered immediately unhealthy.
+ UnhealthyThreshold *wrapperspb.UInt32Value `protobuf:"bytes,4,opt,name=unhealthy_threshold,json=unhealthyThreshold,proto3" json:"unhealthy_threshold,omitempty"`
+ // The number of healthy health checks required before a host is marked
+ // healthy. Note that during startup, only a single successful health check is
+ // required to mark a host healthy.
+ HealthyThreshold *wrapperspb.UInt32Value `protobuf:"bytes,5,opt,name=healthy_threshold,json=healthyThreshold,proto3" json:"healthy_threshold,omitempty"`
+ // [#not-implemented-hide:] Non-serving port for health checking.
+ AltPort *wrapperspb.UInt32Value `protobuf:"bytes,6,opt,name=alt_port,json=altPort,proto3" json:"alt_port,omitempty"`
+ // Reuse health check connection between health checks. Default is true.
+ ReuseConnection *wrapperspb.BoolValue `protobuf:"bytes,7,opt,name=reuse_connection,json=reuseConnection,proto3" json:"reuse_connection,omitempty"`
+ // Types that are assignable to HealthChecker:
+ //
+ // *HealthCheck_HttpHealthCheck_
+ // *HealthCheck_TcpHealthCheck_
+ // *HealthCheck_GrpcHealthCheck_
+ // *HealthCheck_CustomHealthCheck_
+ HealthChecker isHealthCheck_HealthChecker `protobuf_oneof:"health_checker"`
+ // The "no traffic interval" is a special health check interval that is used when a cluster has
+ // never had traffic routed to it. This lower interval allows cluster information to be kept up to
+ // date, without sending a potentially large amount of active health checking traffic for no
+ // reason. Once a cluster has been used for traffic routing, Envoy will shift back to using the
+ // standard health check interval that is defined. Note that this interval takes precedence over
+ // any other.
+ //
+ // The default value for "no traffic interval" is 60 seconds.
+ NoTrafficInterval *durationpb.Duration `protobuf:"bytes,12,opt,name=no_traffic_interval,json=noTrafficInterval,proto3" json:"no_traffic_interval,omitempty"`
+ // The "no traffic healthy interval" is a special health check interval that
+ // is used for hosts that are currently passing active health checking
+ // (including new hosts) when the cluster has received no traffic.
+ //
+ // This is useful for when we want to send frequent health checks with
+ // “no_traffic_interval“ but then revert to lower frequency “no_traffic_healthy_interval“ once
+ // a host in the cluster is marked as healthy.
+ //
+ // Once a cluster has been used for traffic routing, Envoy will shift back to using the
+ // standard health check interval that is defined.
+ //
+ // If no_traffic_healthy_interval is not set, it will default to the
+ // no traffic interval and send that interval regardless of health state.
+ NoTrafficHealthyInterval *durationpb.Duration `protobuf:"bytes,24,opt,name=no_traffic_healthy_interval,json=noTrafficHealthyInterval,proto3" json:"no_traffic_healthy_interval,omitempty"`
+ // The "unhealthy interval" is a health check interval that is used for hosts that are marked as
+ // unhealthy. As soon as the host is marked as healthy, Envoy will shift back to using the
+ // standard health check interval that is defined.
+ //
+ // The default value for "unhealthy interval" is the same as "interval".
+ UnhealthyInterval *durationpb.Duration `protobuf:"bytes,14,opt,name=unhealthy_interval,json=unhealthyInterval,proto3" json:"unhealthy_interval,omitempty"`
+ // The "unhealthy edge interval" is a special health check interval that is used for the first
+ // health check right after a host is marked as unhealthy. For subsequent health checks
+ // Envoy will shift back to using either "unhealthy interval" if present or the standard health
+ // check interval that is defined.
+ //
+ // The default value for "unhealthy edge interval" is the same as "unhealthy interval".
+ UnhealthyEdgeInterval *durationpb.Duration `protobuf:"bytes,15,opt,name=unhealthy_edge_interval,json=unhealthyEdgeInterval,proto3" json:"unhealthy_edge_interval,omitempty"`
+ // The "healthy edge interval" is a special health check interval that is used for the first
+ // health check right after a host is marked as healthy. For subsequent health checks
+ // Envoy will shift back to using the standard health check interval that is defined.
+ //
+ // The default value for "healthy edge interval" is the same as the default interval.
+ HealthyEdgeInterval *durationpb.Duration `protobuf:"bytes,16,opt,name=healthy_edge_interval,json=healthyEdgeInterval,proto3" json:"healthy_edge_interval,omitempty"`
+ // .. attention::
+ // This field is deprecated in favor of the extension
+ // :ref:`event_logger ` and
+ // :ref:`event_log_path `
+ // in the file sink extension.
+ //
+ // Specifies the path to the :ref:`health check event log `.
+ //
+ // Deprecated: Do not use.
+ EventLogPath string `protobuf:"bytes,17,opt,name=event_log_path,json=eventLogPath,proto3" json:"event_log_path,omitempty"`
+ // A list of event log sinks to process the health check event.
+ // [#extension-category: envoy.health_check.event_sinks]
+ EventLogger []*TypedExtensionConfig `protobuf:"bytes,25,rep,name=event_logger,json=eventLogger,proto3" json:"event_logger,omitempty"`
+ // [#not-implemented-hide:]
+ // The gRPC service for the health check event service.
+ // If empty, health check events won't be sent to a remote endpoint.
+ EventService *EventServiceConfig `protobuf:"bytes,22,opt,name=event_service,json=eventService,proto3" json:"event_service,omitempty"`
+ // If set to true, health check failure events will always be logged. If set to false, only the
+ // initial health check failure event will be logged.
+ // The default value is false.
+ AlwaysLogHealthCheckFailures bool `protobuf:"varint,19,opt,name=always_log_health_check_failures,json=alwaysLogHealthCheckFailures,proto3" json:"always_log_health_check_failures,omitempty"`
+ // This allows overriding the cluster TLS settings, just for health check connections.
+ TlsOptions *HealthCheck_TlsOptions `protobuf:"bytes,21,opt,name=tls_options,json=tlsOptions,proto3" json:"tls_options,omitempty"`
+ // Optional key/value pairs that will be used to match a transport socket from those specified in the cluster's
+ // :ref:`tranport socket matches `.
+ // For example, the following match criteria
+ //
+ // .. code-block:: yaml
+ //
+ // transport_socket_match_criteria:
+ // useMTLS: true
+ //
+ // Will match the following :ref:`cluster socket match `
+ //
+ // .. code-block:: yaml
+ //
+ // transport_socket_matches:
+ // - name: "useMTLS"
+ // match:
+ // useMTLS: true
+ // transport_socket:
+ // name: envoy.transport_sockets.tls
+ // config: { ... } # tls socket configuration
+ //
+ // If this field is set, then for health checks it will supersede an entry of “envoy.transport_socket“ in the
+ // :ref:`LbEndpoint.Metadata `.
+ // This allows using different transport socket capabilities for health checking versus proxying to the
+ // endpoint.
+ //
+ // If the key/values pairs specified do not match any
+ // :ref:`transport socket matches `,
+ // the cluster's :ref:`transport socket `
+ // will be used for health check socket configuration.
+ TransportSocketMatchCriteria *structpb.Struct `protobuf:"bytes,23,opt,name=transport_socket_match_criteria,json=transportSocketMatchCriteria,proto3" json:"transport_socket_match_criteria,omitempty"`
+}
+
+func (x *HealthCheck) Reset() {
+ *x = HealthCheck{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_health_check_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *HealthCheck) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HealthCheck) ProtoMessage() {}
+
+func (x *HealthCheck) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_health_check_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HealthCheck.ProtoReflect.Descriptor instead.
+func (*HealthCheck) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_health_check_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *HealthCheck) GetTimeout() *durationpb.Duration {
+ if x != nil {
+ return x.Timeout
+ }
+ return nil
+}
+
+func (x *HealthCheck) GetInterval() *durationpb.Duration {
+ if x != nil {
+ return x.Interval
+ }
+ return nil
+}
+
+func (x *HealthCheck) GetInitialJitter() *durationpb.Duration {
+ if x != nil {
+ return x.InitialJitter
+ }
+ return nil
+}
+
+func (x *HealthCheck) GetIntervalJitter() *durationpb.Duration {
+ if x != nil {
+ return x.IntervalJitter
+ }
+ return nil
+}
+
+func (x *HealthCheck) GetIntervalJitterPercent() uint32 {
+ if x != nil {
+ return x.IntervalJitterPercent
+ }
+ return 0
+}
+
+func (x *HealthCheck) GetUnhealthyThreshold() *wrapperspb.UInt32Value {
+ if x != nil {
+ return x.UnhealthyThreshold
+ }
+ return nil
+}
+
+func (x *HealthCheck) GetHealthyThreshold() *wrapperspb.UInt32Value {
+ if x != nil {
+ return x.HealthyThreshold
+ }
+ return nil
+}
+
+func (x *HealthCheck) GetAltPort() *wrapperspb.UInt32Value {
+ if x != nil {
+ return x.AltPort
+ }
+ return nil
+}
+
+func (x *HealthCheck) GetReuseConnection() *wrapperspb.BoolValue {
+ if x != nil {
+ return x.ReuseConnection
+ }
+ return nil
+}
+
+func (m *HealthCheck) GetHealthChecker() isHealthCheck_HealthChecker {
+ if m != nil {
+ return m.HealthChecker
+ }
+ return nil
+}
+
+func (x *HealthCheck) GetHttpHealthCheck() *HealthCheck_HttpHealthCheck {
+ if x, ok := x.GetHealthChecker().(*HealthCheck_HttpHealthCheck_); ok {
+ return x.HttpHealthCheck
+ }
+ return nil
+}
+
+func (x *HealthCheck) GetTcpHealthCheck() *HealthCheck_TcpHealthCheck {
+ if x, ok := x.GetHealthChecker().(*HealthCheck_TcpHealthCheck_); ok {
+ return x.TcpHealthCheck
+ }
+ return nil
+}
+
+func (x *HealthCheck) GetGrpcHealthCheck() *HealthCheck_GrpcHealthCheck {
+ if x, ok := x.GetHealthChecker().(*HealthCheck_GrpcHealthCheck_); ok {
+ return x.GrpcHealthCheck
+ }
+ return nil
+}
+
+func (x *HealthCheck) GetCustomHealthCheck() *HealthCheck_CustomHealthCheck {
+ if x, ok := x.GetHealthChecker().(*HealthCheck_CustomHealthCheck_); ok {
+ return x.CustomHealthCheck
+ }
+ return nil
+}
+
+func (x *HealthCheck) GetNoTrafficInterval() *durationpb.Duration {
+ if x != nil {
+ return x.NoTrafficInterval
+ }
+ return nil
+}
+
+func (x *HealthCheck) GetNoTrafficHealthyInterval() *durationpb.Duration {
+ if x != nil {
+ return x.NoTrafficHealthyInterval
+ }
+ return nil
+}
+
+func (x *HealthCheck) GetUnhealthyInterval() *durationpb.Duration {
+ if x != nil {
+ return x.UnhealthyInterval
+ }
+ return nil
+}
+
+func (x *HealthCheck) GetUnhealthyEdgeInterval() *durationpb.Duration {
+ if x != nil {
+ return x.UnhealthyEdgeInterval
+ }
+ return nil
+}
+
+func (x *HealthCheck) GetHealthyEdgeInterval() *durationpb.Duration {
+ if x != nil {
+ return x.HealthyEdgeInterval
+ }
+ return nil
+}
+
+// Deprecated: Do not use.
+func (x *HealthCheck) GetEventLogPath() string {
+ if x != nil {
+ return x.EventLogPath
+ }
+ return ""
+}
+
+func (x *HealthCheck) GetEventLogger() []*TypedExtensionConfig {
+ if x != nil {
+ return x.EventLogger
+ }
+ return nil
+}
+
+func (x *HealthCheck) GetEventService() *EventServiceConfig {
+ if x != nil {
+ return x.EventService
+ }
+ return nil
+}
+
+func (x *HealthCheck) GetAlwaysLogHealthCheckFailures() bool {
+ if x != nil {
+ return x.AlwaysLogHealthCheckFailures
+ }
+ return false
+}
+
+func (x *HealthCheck) GetTlsOptions() *HealthCheck_TlsOptions {
+ if x != nil {
+ return x.TlsOptions
+ }
+ return nil
+}
+
+func (x *HealthCheck) GetTransportSocketMatchCriteria() *structpb.Struct {
+ if x != nil {
+ return x.TransportSocketMatchCriteria
+ }
+ return nil
+}
+
+type isHealthCheck_HealthChecker interface {
+ isHealthCheck_HealthChecker()
+}
+
+type HealthCheck_HttpHealthCheck_ struct {
+ // HTTP health check.
+ HttpHealthCheck *HealthCheck_HttpHealthCheck `protobuf:"bytes,8,opt,name=http_health_check,json=httpHealthCheck,proto3,oneof"`
+}
+
+type HealthCheck_TcpHealthCheck_ struct {
+ // TCP health check.
+ TcpHealthCheck *HealthCheck_TcpHealthCheck `protobuf:"bytes,9,opt,name=tcp_health_check,json=tcpHealthCheck,proto3,oneof"`
+}
+
+type HealthCheck_GrpcHealthCheck_ struct {
+ // gRPC health check.
+ GrpcHealthCheck *HealthCheck_GrpcHealthCheck `protobuf:"bytes,11,opt,name=grpc_health_check,json=grpcHealthCheck,proto3,oneof"`
+}
+
+type HealthCheck_CustomHealthCheck_ struct {
+ // Custom health check.
+ CustomHealthCheck *HealthCheck_CustomHealthCheck `protobuf:"bytes,13,opt,name=custom_health_check,json=customHealthCheck,proto3,oneof"`
+}
+
+func (*HealthCheck_HttpHealthCheck_) isHealthCheck_HealthChecker() {}
+
+func (*HealthCheck_TcpHealthCheck_) isHealthCheck_HealthChecker() {}
+
+func (*HealthCheck_GrpcHealthCheck_) isHealthCheck_HealthChecker() {}
+
+func (*HealthCheck_CustomHealthCheck_) isHealthCheck_HealthChecker() {}
+
+// Describes the encoding of the payload bytes in the payload.
+type HealthCheck_Payload struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to Payload:
+ //
+ // *HealthCheck_Payload_Text
+ // *HealthCheck_Payload_Binary
+ Payload isHealthCheck_Payload_Payload `protobuf_oneof:"payload"`
+}
+
+func (x *HealthCheck_Payload) Reset() {
+ *x = HealthCheck_Payload{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_health_check_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *HealthCheck_Payload) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HealthCheck_Payload) ProtoMessage() {}
+
+func (x *HealthCheck_Payload) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_health_check_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HealthCheck_Payload.ProtoReflect.Descriptor instead.
+func (*HealthCheck_Payload) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_health_check_proto_rawDescGZIP(), []int{1, 0}
+}
+
+func (m *HealthCheck_Payload) GetPayload() isHealthCheck_Payload_Payload {
+ if m != nil {
+ return m.Payload
+ }
+ return nil
+}
+
+func (x *HealthCheck_Payload) GetText() string {
+ if x, ok := x.GetPayload().(*HealthCheck_Payload_Text); ok {
+ return x.Text
+ }
+ return ""
+}
+
+func (x *HealthCheck_Payload) GetBinary() []byte {
+ if x, ok := x.GetPayload().(*HealthCheck_Payload_Binary); ok {
+ return x.Binary
+ }
+ return nil
+}
+
+type isHealthCheck_Payload_Payload interface {
+ isHealthCheck_Payload_Payload()
+}
+
+type HealthCheck_Payload_Text struct {
+ // Hex encoded payload. E.g., "000000FF".
+ Text string `protobuf:"bytes,1,opt,name=text,proto3,oneof"`
+}
+
+type HealthCheck_Payload_Binary struct {
+ // Binary payload.
+ Binary []byte `protobuf:"bytes,2,opt,name=binary,proto3,oneof"`
+}
+
+func (*HealthCheck_Payload_Text) isHealthCheck_Payload_Payload() {}
+
+func (*HealthCheck_Payload_Binary) isHealthCheck_Payload_Payload() {}
+
+// [#next-free-field: 15]
+type HealthCheck_HttpHealthCheck struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The value of the host header in the HTTP health check request. If
+ // left empty (default value), the name of the cluster this health check is associated
+ // with will be used. The host header can be customized for a specific endpoint by setting the
+ // :ref:`hostname ` field.
+ Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"`
+ // Specifies the HTTP path that will be requested during health checking. For example
+ // “/healthcheck“.
+ Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"`
+ // [#not-implemented-hide:] HTTP specific payload.
+ Send *HealthCheck_Payload `protobuf:"bytes,3,opt,name=send,proto3" json:"send,omitempty"`
+ // Specifies a list of HTTP expected responses to match in the first “response_buffer_size“ bytes of the response body.
+ // If it is set, both the expected response check and status code determine the health check.
+ // When checking the response, “fuzzy” matching is performed such that each payload block must be found,
+ // and in the order specified, but not necessarily contiguous.
+ //
+ // .. note::
+ //
+ // It is recommended to set ``response_buffer_size`` based on the total Payload size for efficiency.
+ // The default buffer size is 1024 bytes when it is not set.
+ Receive []*HealthCheck_Payload `protobuf:"bytes,4,rep,name=receive,proto3" json:"receive,omitempty"`
+ // Specifies the size of response buffer in bytes that is used to Payload match.
+ // The default value is 1024. Setting to 0 implies that the Payload will be matched against the entire response.
+ ResponseBufferSize *wrapperspb.UInt64Value `protobuf:"bytes,14,opt,name=response_buffer_size,json=responseBufferSize,proto3" json:"response_buffer_size,omitempty"`
+ // Specifies a list of HTTP headers that should be added to each request that is sent to the
+ // health checked cluster. For more information, including details on header value syntax, see
+ // the documentation on :ref:`custom request headers
+ // `.
+ RequestHeadersToAdd []*HeaderValueOption `protobuf:"bytes,6,rep,name=request_headers_to_add,json=requestHeadersToAdd,proto3" json:"request_headers_to_add,omitempty"`
+ // Specifies a list of HTTP headers that should be removed from each request that is sent to the
+ // health checked cluster.
+ RequestHeadersToRemove []string `protobuf:"bytes,8,rep,name=request_headers_to_remove,json=requestHeadersToRemove,proto3" json:"request_headers_to_remove,omitempty"`
+ // Specifies a list of HTTP response statuses considered healthy. If provided, replaces default
+ // 200-only policy - 200 must be included explicitly as needed. Ranges follow half-open
+ // semantics of :ref:`Int64Range `. The start and end of each
+ // range are required. Only statuses in the range [100, 600) are allowed.
+ ExpectedStatuses []*v3.Int64Range `protobuf:"bytes,9,rep,name=expected_statuses,json=expectedStatuses,proto3" json:"expected_statuses,omitempty"`
+ // Specifies a list of HTTP response statuses considered retriable. If provided, responses in this range
+ // will count towards the configured :ref:`unhealthy_threshold `,
+ // but will not result in the host being considered immediately unhealthy. Ranges follow half-open semantics of
+ // :ref:`Int64Range `. The start and end of each range are required.
+ // Only statuses in the range [100, 600) are allowed. The :ref:`expected_statuses `
+ // field takes precedence for any range overlaps with this field i.e. if status code 200 is both retriable and expected, a 200 response will
+ // be considered a successful health check. By default all responses not in
+ // :ref:`expected_statuses ` will result in
+ // the host being considered immediately unhealthy i.e. if status code 200 is expected and there are no configured retriable statuses, any
+ // non-200 response will result in the host being marked unhealthy.
+ RetriableStatuses []*v3.Int64Range `protobuf:"bytes,12,rep,name=retriable_statuses,json=retriableStatuses,proto3" json:"retriable_statuses,omitempty"`
+ // Use specified application protocol for health checks.
+ CodecClientType v3.CodecClientType `protobuf:"varint,10,opt,name=codec_client_type,json=codecClientType,proto3,enum=envoy.type.v3.CodecClientType" json:"codec_client_type,omitempty"`
+ // An optional service name parameter which is used to validate the identity of
+ // the health checked cluster using a :ref:`StringMatcher
+ // `. See the :ref:`architecture overview
+ // ` for more information.
+ ServiceNameMatcher *v31.StringMatcher `protobuf:"bytes,11,opt,name=service_name_matcher,json=serviceNameMatcher,proto3" json:"service_name_matcher,omitempty"`
+ // HTTP Method that will be used for health checking, default is "GET".
+ // GET, HEAD, POST, PUT, DELETE, OPTIONS, TRACE, PATCH methods are supported, but making request body is not supported.
+ // CONNECT method is disallowed because it is not appropriate for health check request.
+ // If a non-200 response is expected by the method, it needs to be set in :ref:`expected_statuses `.
+ Method RequestMethod `protobuf:"varint,13,opt,name=method,proto3,enum=envoy.config.core.v3.RequestMethod" json:"method,omitempty"`
+}
+
+func (x *HealthCheck_HttpHealthCheck) Reset() {
+ *x = HealthCheck_HttpHealthCheck{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_health_check_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *HealthCheck_HttpHealthCheck) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HealthCheck_HttpHealthCheck) ProtoMessage() {}
+
+func (x *HealthCheck_HttpHealthCheck) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_health_check_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HealthCheck_HttpHealthCheck.ProtoReflect.Descriptor instead.
+func (*HealthCheck_HttpHealthCheck) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_health_check_proto_rawDescGZIP(), []int{1, 1}
+}
+
+func (x *HealthCheck_HttpHealthCheck) GetHost() string {
+ if x != nil {
+ return x.Host
+ }
+ return ""
+}
+
+func (x *HealthCheck_HttpHealthCheck) GetPath() string {
+ if x != nil {
+ return x.Path
+ }
+ return ""
+}
+
+func (x *HealthCheck_HttpHealthCheck) GetSend() *HealthCheck_Payload {
+ if x != nil {
+ return x.Send
+ }
+ return nil
+}
+
+func (x *HealthCheck_HttpHealthCheck) GetReceive() []*HealthCheck_Payload {
+ if x != nil {
+ return x.Receive
+ }
+ return nil
+}
+
+func (x *HealthCheck_HttpHealthCheck) GetResponseBufferSize() *wrapperspb.UInt64Value {
+ if x != nil {
+ return x.ResponseBufferSize
+ }
+ return nil
+}
+
+func (x *HealthCheck_HttpHealthCheck) GetRequestHeadersToAdd() []*HeaderValueOption {
+ if x != nil {
+ return x.RequestHeadersToAdd
+ }
+ return nil
+}
+
+func (x *HealthCheck_HttpHealthCheck) GetRequestHeadersToRemove() []string {
+ if x != nil {
+ return x.RequestHeadersToRemove
+ }
+ return nil
+}
+
+func (x *HealthCheck_HttpHealthCheck) GetExpectedStatuses() []*v3.Int64Range {
+ if x != nil {
+ return x.ExpectedStatuses
+ }
+ return nil
+}
+
+func (x *HealthCheck_HttpHealthCheck) GetRetriableStatuses() []*v3.Int64Range {
+ if x != nil {
+ return x.RetriableStatuses
+ }
+ return nil
+}
+
+func (x *HealthCheck_HttpHealthCheck) GetCodecClientType() v3.CodecClientType {
+ if x != nil {
+ return x.CodecClientType
+ }
+ return v3.CodecClientType_HTTP1
+}
+
+func (x *HealthCheck_HttpHealthCheck) GetServiceNameMatcher() *v31.StringMatcher {
+ if x != nil {
+ return x.ServiceNameMatcher
+ }
+ return nil
+}
+
+func (x *HealthCheck_HttpHealthCheck) GetMethod() RequestMethod {
+ if x != nil {
+ return x.Method
+ }
+ return RequestMethod_METHOD_UNSPECIFIED
+}
+
+type HealthCheck_TcpHealthCheck struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Empty payloads imply a connect-only health check.
+ Send *HealthCheck_Payload `protobuf:"bytes,1,opt,name=send,proto3" json:"send,omitempty"`
+ // When checking the response, “fuzzy” matching is performed such that each
+ // payload block must be found, and in the order specified, but not
+ // necessarily contiguous.
+ Receive []*HealthCheck_Payload `protobuf:"bytes,2,rep,name=receive,proto3" json:"receive,omitempty"`
+}
+
+func (x *HealthCheck_TcpHealthCheck) Reset() {
+ *x = HealthCheck_TcpHealthCheck{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_health_check_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *HealthCheck_TcpHealthCheck) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HealthCheck_TcpHealthCheck) ProtoMessage() {}
+
+func (x *HealthCheck_TcpHealthCheck) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_health_check_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HealthCheck_TcpHealthCheck.ProtoReflect.Descriptor instead.
+func (*HealthCheck_TcpHealthCheck) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_health_check_proto_rawDescGZIP(), []int{1, 2}
+}
+
+func (x *HealthCheck_TcpHealthCheck) GetSend() *HealthCheck_Payload {
+ if x != nil {
+ return x.Send
+ }
+ return nil
+}
+
+func (x *HealthCheck_TcpHealthCheck) GetReceive() []*HealthCheck_Payload {
+ if x != nil {
+ return x.Receive
+ }
+ return nil
+}
+
+type HealthCheck_RedisHealthCheck struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // If set, optionally perform “EXISTS “ instead of “PING“. A return value
+ // from Redis of 0 (does not exist) is considered a passing healthcheck. A return value other
+ // than 0 is considered a failure. This allows the user to mark a Redis instance for maintenance
+ // by setting the specified key to any value and waiting for traffic to drain.
+ Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+}
+
+func (x *HealthCheck_RedisHealthCheck) Reset() {
+ *x = HealthCheck_RedisHealthCheck{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_health_check_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *HealthCheck_RedisHealthCheck) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HealthCheck_RedisHealthCheck) ProtoMessage() {}
+
+func (x *HealthCheck_RedisHealthCheck) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_health_check_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HealthCheck_RedisHealthCheck.ProtoReflect.Descriptor instead.
+func (*HealthCheck_RedisHealthCheck) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_health_check_proto_rawDescGZIP(), []int{1, 3}
+}
+
+func (x *HealthCheck_RedisHealthCheck) GetKey() string {
+ if x != nil {
+ return x.Key
+ }
+ return ""
+}
+
+// `grpc.health.v1.Health
+// `_-based
+// healthcheck. See `gRPC doc `_
+// for details.
+type HealthCheck_GrpcHealthCheck struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // An optional service name parameter which will be sent to gRPC service in
+ // `grpc.health.v1.HealthCheckRequest
+ // `_.
+ // message. See `gRPC health-checking overview
+ // `_ for more information.
+ ServiceName string `protobuf:"bytes,1,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"`
+ // The value of the :authority header in the gRPC health check request. If
+ // left empty (default value), the name of the cluster this health check is associated
+ // with will be used. The authority header can be customized for a specific endpoint by setting
+ // the :ref:`hostname ` field.
+ Authority string `protobuf:"bytes,2,opt,name=authority,proto3" json:"authority,omitempty"`
+ // Specifies a list of key-value pairs that should be added to the metadata of each GRPC call
+ // that is sent to the health checked cluster. For more information, including details on header value syntax,
+ // see the documentation on :ref:`custom request headers
+ // `.
+ InitialMetadata []*HeaderValueOption `protobuf:"bytes,3,rep,name=initial_metadata,json=initialMetadata,proto3" json:"initial_metadata,omitempty"`
+}
+
+func (x *HealthCheck_GrpcHealthCheck) Reset() {
+ *x = HealthCheck_GrpcHealthCheck{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_health_check_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *HealthCheck_GrpcHealthCheck) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HealthCheck_GrpcHealthCheck) ProtoMessage() {}
+
+func (x *HealthCheck_GrpcHealthCheck) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_health_check_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HealthCheck_GrpcHealthCheck.ProtoReflect.Descriptor instead.
+func (*HealthCheck_GrpcHealthCheck) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_health_check_proto_rawDescGZIP(), []int{1, 4}
+}
+
+func (x *HealthCheck_GrpcHealthCheck) GetServiceName() string {
+ if x != nil {
+ return x.ServiceName
+ }
+ return ""
+}
+
+func (x *HealthCheck_GrpcHealthCheck) GetAuthority() string {
+ if x != nil {
+ return x.Authority
+ }
+ return ""
+}
+
+func (x *HealthCheck_GrpcHealthCheck) GetInitialMetadata() []*HeaderValueOption {
+ if x != nil {
+ return x.InitialMetadata
+ }
+ return nil
+}
+
+// Custom health check.
+type HealthCheck_CustomHealthCheck struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The registered name of the custom health checker.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // A custom health checker specific configuration which depends on the custom health checker
+ // being instantiated. See :api:`envoy/config/health_checker` for reference.
+ // [#extension-category: envoy.health_checkers]
+ //
+ // Types that are assignable to ConfigType:
+ //
+ // *HealthCheck_CustomHealthCheck_TypedConfig
+ ConfigType isHealthCheck_CustomHealthCheck_ConfigType `protobuf_oneof:"config_type"`
+}
+
+func (x *HealthCheck_CustomHealthCheck) Reset() {
+ *x = HealthCheck_CustomHealthCheck{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_health_check_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *HealthCheck_CustomHealthCheck) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HealthCheck_CustomHealthCheck) ProtoMessage() {}
+
+func (x *HealthCheck_CustomHealthCheck) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_health_check_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HealthCheck_CustomHealthCheck.ProtoReflect.Descriptor instead.
+func (*HealthCheck_CustomHealthCheck) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_health_check_proto_rawDescGZIP(), []int{1, 5}
+}
+
+func (x *HealthCheck_CustomHealthCheck) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (m *HealthCheck_CustomHealthCheck) GetConfigType() isHealthCheck_CustomHealthCheck_ConfigType {
+ if m != nil {
+ return m.ConfigType
+ }
+ return nil
+}
+
+func (x *HealthCheck_CustomHealthCheck) GetTypedConfig() *anypb.Any {
+ if x, ok := x.GetConfigType().(*HealthCheck_CustomHealthCheck_TypedConfig); ok {
+ return x.TypedConfig
+ }
+ return nil
+}
+
+type isHealthCheck_CustomHealthCheck_ConfigType interface {
+ isHealthCheck_CustomHealthCheck_ConfigType()
+}
+
+type HealthCheck_CustomHealthCheck_TypedConfig struct {
+ TypedConfig *anypb.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"`
+}
+
+func (*HealthCheck_CustomHealthCheck_TypedConfig) isHealthCheck_CustomHealthCheck_ConfigType() {}
+
+// Health checks occur over the transport socket specified for the cluster. This implies that if a
+// cluster is using a TLS-enabled transport socket, the health check will also occur over TLS.
+//
+// This allows overriding the cluster TLS settings, just for health check connections.
+type HealthCheck_TlsOptions struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Specifies the ALPN protocols for health check connections. This is useful if the
+ // corresponding upstream is using ALPN-based :ref:`FilterChainMatch
+ // ` along with different protocols for health checks
+ // versus data connections. If empty, no ALPN protocols will be set on health check connections.
+ AlpnProtocols []string `protobuf:"bytes,1,rep,name=alpn_protocols,json=alpnProtocols,proto3" json:"alpn_protocols,omitempty"`
+}
+
+func (x *HealthCheck_TlsOptions) Reset() {
+ *x = HealthCheck_TlsOptions{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_health_check_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *HealthCheck_TlsOptions) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HealthCheck_TlsOptions) ProtoMessage() {}
+
+func (x *HealthCheck_TlsOptions) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_health_check_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HealthCheck_TlsOptions.ProtoReflect.Descriptor instead.
+func (*HealthCheck_TlsOptions) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_health_check_proto_rawDescGZIP(), []int{1, 6}
+}
+
+func (x *HealthCheck_TlsOptions) GetAlpnProtocols() []string {
+ if x != nil {
+ return x.AlpnProtocols
+ }
+ return nil
+}
+
+var File_envoy_config_core_v3_health_check_proto protoreflect.FileDescriptor
+
+var file_envoy_config_core_v3_health_check_proto_rawDesc = []byte{
+ 0x0a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63,
+ 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68,
+ 0x65, 0x63, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x1a,
+ 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f,
+ 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x1a, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63,
+ 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x1a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f,
+ 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f,
+ 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74,
+ 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x73,
+ 0x74, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x18, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70,
+ 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72,
+ 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70,
+ 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x64, 0x65, 0x70,
+ 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d,
+ 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75,
+ 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f,
+ 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64,
+ 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x60, 0x0a, 0x0f, 0x48, 0x65, 0x61,
+ 0x6c, 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x53, 0x65, 0x74, 0x12, 0x4d, 0x0a, 0x08,
+ 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x22,
+ 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f,
+ 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x53, 0x74, 0x61, 0x74,
+ 0x75, 0x73, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x92, 0x01, 0x07, 0x22, 0x05, 0x82, 0x01, 0x02, 0x10,
+ 0x01, 0x52, 0x08, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x22, 0xed, 0x1e, 0x0a, 0x0b,
+ 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x3f, 0x0a, 0x07, 0x74,
+ 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44,
+ 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0xaa, 0x01, 0x04, 0x08,
+ 0x01, 0x2a, 0x00, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x41, 0x0a, 0x08,
+ 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0xaa, 0x01,
+ 0x04, 0x08, 0x01, 0x2a, 0x00, 0x52, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12,
+ 0x40, 0x0a, 0x0e, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x6a, 0x69, 0x74, 0x74, 0x65,
+ 0x72, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x52, 0x0d, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x4a, 0x69, 0x74, 0x74, 0x65,
+ 0x72, 0x12, 0x42, 0x0a, 0x0f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x5f, 0x6a, 0x69,
+ 0x74, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x4a,
+ 0x69, 0x74, 0x74, 0x65, 0x72, 0x12, 0x36, 0x0a, 0x17, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61,
+ 0x6c, 0x5f, 0x6a, 0x69, 0x74, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74,
+ 0x18, 0x12, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x15, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c,
+ 0x4a, 0x69, 0x74, 0x74, 0x65, 0x72, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x12, 0x57, 0x0a,
+ 0x13, 0x75, 0x6e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73,
+ 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e,
+ 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02,
+ 0x10, 0x01, 0x52, 0x12, 0x75, 0x6e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x54, 0x68, 0x72,
+ 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x53, 0x0a, 0x11, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68,
+ 0x79, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42,
+ 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x10, 0x68, 0x65, 0x61, 0x6c, 0x74,
+ 0x68, 0x79, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x37, 0x0a, 0x08, 0x61,
+ 0x6c, 0x74, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+ 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x07, 0x61, 0x6c, 0x74,
+ 0x50, 0x6f, 0x72, 0x74, 0x12, 0x45, 0x0a, 0x10, 0x72, 0x65, 0x75, 0x73, 0x65, 0x5f, 0x63, 0x6f,
+ 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0f, 0x72, 0x65, 0x75, 0x73,
+ 0x65, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x5f, 0x0a, 0x11, 0x68,
+ 0x74, 0x74, 0x70, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b,
+ 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65,
+ 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x48, 0x65,
+ 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x48, 0x00, 0x52, 0x0f, 0x68, 0x74, 0x74,
+ 0x70, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x5c, 0x0a, 0x10,
+ 0x74, 0x63, 0x70, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b,
+ 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65,
+ 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x54, 0x63, 0x70, 0x48, 0x65, 0x61,
+ 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x48, 0x00, 0x52, 0x0e, 0x74, 0x63, 0x70, 0x48,
+ 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x5f, 0x0a, 0x11, 0x67, 0x72,
+ 0x70, 0x63, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x18,
+ 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61,
+ 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x48, 0x65, 0x61,
+ 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x48, 0x00, 0x52, 0x0f, 0x67, 0x72, 0x70, 0x63,
+ 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x65, 0x0a, 0x13, 0x63,
+ 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68, 0x65,
+ 0x63, 0x6b, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e,
+ 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x43, 0x75, 0x73, 0x74,
+ 0x6f, 0x6d, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x48, 0x00, 0x52,
+ 0x11, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65,
+ 0x63, 0x6b, 0x12, 0x53, 0x0a, 0x13, 0x6e, 0x6f, 0x5f, 0x74, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63,
+ 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0xaa,
+ 0x01, 0x02, 0x2a, 0x00, 0x52, 0x11, 0x6e, 0x6f, 0x54, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x49,
+ 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x62, 0x0a, 0x1b, 0x6e, 0x6f, 0x5f, 0x74, 0x72,
+ 0x61, 0x66, 0x66, 0x69, 0x63, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x5f, 0x69, 0x6e,
+ 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44,
+ 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0xaa, 0x01, 0x02, 0x2a,
+ 0x00, 0x52, 0x18, 0x6e, 0x6f, 0x54, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x48, 0x65, 0x61, 0x6c,
+ 0x74, 0x68, 0x79, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x52, 0x0a, 0x12, 0x75,
+ 0x6e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61,
+ 0x6c, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0xaa, 0x01, 0x02, 0x2a, 0x00, 0x52, 0x11, 0x75, 0x6e,
+ 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12,
+ 0x5b, 0x0a, 0x17, 0x75, 0x6e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x5f, 0x65, 0x64, 0x67,
+ 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05,
+ 0xaa, 0x01, 0x02, 0x2a, 0x00, 0x52, 0x15, 0x75, 0x6e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79,
+ 0x45, 0x64, 0x67, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x57, 0x0a, 0x15,
+ 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x5f, 0x65, 0x64, 0x67, 0x65, 0x5f, 0x69, 0x6e, 0x74,
+ 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75,
+ 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0xaa, 0x01, 0x02, 0x2a, 0x00,
+ 0x52, 0x13, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x79, 0x45, 0x64, 0x67, 0x65, 0x49, 0x6e, 0x74,
+ 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x31, 0x0a, 0x0e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x6c,
+ 0x6f, 0x67, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x11, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0x18,
+ 0x01, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x52, 0x0c, 0x65, 0x76, 0x65, 0x6e,
+ 0x74, 0x4c, 0x6f, 0x67, 0x50, 0x61, 0x74, 0x68, 0x12, 0x4d, 0x0a, 0x0c, 0x65, 0x76, 0x65, 0x6e,
+ 0x74, 0x5f, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x18, 0x19, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a,
+ 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f,
+ 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e,
+ 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0b, 0x65, 0x76, 0x65, 0x6e,
+ 0x74, 0x4c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x12, 0x4d, 0x0a, 0x0d, 0x65, 0x76, 0x65, 0x6e, 0x74,
+ 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28,
+ 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f,
+ 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69,
+ 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0c, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x53,
+ 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x46, 0x0a, 0x20, 0x61, 0x6c, 0x77, 0x61, 0x79, 0x73,
+ 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68, 0x65, 0x63,
+ 0x6b, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x08,
+ 0x52, 0x1c, 0x61, 0x6c, 0x77, 0x61, 0x79, 0x73, 0x4c, 0x6f, 0x67, 0x48, 0x65, 0x61, 0x6c, 0x74,
+ 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x73, 0x12, 0x4d,
+ 0x0a, 0x0b, 0x74, 0x6c, 0x73, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x15, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74,
+ 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x54, 0x6c, 0x73, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x52, 0x0a, 0x74, 0x6c, 0x73, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5e, 0x0a,
+ 0x1f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65,
+ 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x63, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61,
+ 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52,
+ 0x1c, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74,
+ 0x4d, 0x61, 0x74, 0x63, 0x68, 0x43, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x1a, 0x80, 0x01,
+ 0x0a, 0x07, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x1d, 0x0a, 0x04, 0x74, 0x65, 0x78,
+ 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01,
+ 0x48, 0x00, 0x52, 0x04, 0x74, 0x65, 0x78, 0x74, 0x12, 0x18, 0x0a, 0x06, 0x62, 0x69, 0x6e, 0x61,
+ 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x06, 0x62, 0x69, 0x6e, 0x61,
+ 0x72, 0x79, 0x3a, 0x2c, 0x9a, 0xc5, 0x88, 0x1e, 0x27, 0x0a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61,
+ 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64,
+ 0x42, 0x0e, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x03, 0xf8, 0x42, 0x01,
+ 0x1a, 0xcc, 0x07, 0x0a, 0x0f, 0x48, 0x74, 0x74, 0x70, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43,
+ 0x68, 0x65, 0x63, 0x6b, 0x12, 0x1f, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc0, 0x01, 0x02, 0xc8, 0x01, 0x00, 0x52,
+ 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x10, 0x01, 0xc0, 0x01, 0x02, 0xc8,
+ 0x01, 0x00, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x3d, 0x0a, 0x04, 0x73, 0x65, 0x6e, 0x64,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65,
+ 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61,
+ 0x64, 0x52, 0x04, 0x73, 0x65, 0x6e, 0x64, 0x12, 0x43, 0x0a, 0x07, 0x72, 0x65, 0x63, 0x65, 0x69,
+ 0x76, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e,
+ 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x50, 0x61, 0x79, 0x6c,
+ 0x6f, 0x61, 0x64, 0x52, 0x07, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x12, 0x57, 0x0a, 0x14,
+ 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x62, 0x75, 0x66, 0x66, 0x65, 0x72, 0x5f,
+ 0x73, 0x69, 0x7a, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e,
+ 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x32, 0x02, 0x28,
+ 0x00, 0x52, 0x12, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x75, 0x66, 0x66, 0x65,
+ 0x72, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x67, 0x0a, 0x16, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x61, 0x64, 0x64, 0x18,
+ 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61,
+ 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x09,
+ 0xfa, 0x42, 0x06, 0x92, 0x01, 0x03, 0x10, 0xe8, 0x07, 0x52, 0x13, 0x72, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, 0x41, 0x64, 0x64, 0x12, 0x4b,
+ 0x0a, 0x19, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72,
+ 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x18, 0x08, 0x20, 0x03, 0x28,
+ 0x09, 0x42, 0x10, 0xfa, 0x42, 0x0d, 0x92, 0x01, 0x0a, 0x22, 0x08, 0x72, 0x06, 0xc0, 0x01, 0x01,
+ 0xc8, 0x01, 0x00, 0x52, 0x16, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64,
+ 0x65, 0x72, 0x73, 0x54, 0x6f, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x46, 0x0a, 0x11, 0x65,
+ 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73,
+ 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74,
+ 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x52, 0x61, 0x6e, 0x67,
+ 0x65, 0x52, 0x10, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75,
+ 0x73, 0x65, 0x73, 0x12, 0x48, 0x0a, 0x12, 0x72, 0x65, 0x74, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65,
+ 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x19, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e,
+ 0x49, 0x6e, 0x74, 0x36, 0x34, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x11, 0x72, 0x65, 0x74, 0x72,
+ 0x69, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x12, 0x54, 0x0a,
+ 0x11, 0x63, 0x6f, 0x64, 0x65, 0x63, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79,
+ 0x70, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x64, 0x65, 0x63, 0x43, 0x6c,
+ 0x69, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02,
+ 0x10, 0x01, 0x52, 0x0f, 0x63, 0x6f, 0x64, 0x65, 0x63, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54,
+ 0x79, 0x70, 0x65, 0x12, 0x56, 0x0a, 0x14, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e,
+ 0x61, 0x6d, 0x65, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x0b, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d,
+ 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67,
+ 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x12, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
+ 0x4e, 0x61, 0x6d, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x47, 0x0a, 0x06, 0x6d,
+ 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x65, 0x6e,
+ 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e,
+ 0x76, 0x33, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64,
+ 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x82, 0x01, 0x04, 0x10, 0x01, 0x20, 0x06, 0x52, 0x06, 0x6d, 0x65,
+ 0x74, 0x68, 0x6f, 0x64, 0x3a, 0x34, 0x9a, 0xc5, 0x88, 0x1e, 0x2f, 0x0a, 0x2d, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48,
+ 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x48,
+ 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06,
+ 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08, 0x52, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f,
+ 0x6e, 0x61, 0x6d, 0x65, 0x52, 0x09, 0x75, 0x73, 0x65, 0x5f, 0x68, 0x74, 0x74, 0x70, 0x32, 0x1a,
+ 0xc9, 0x01, 0x0a, 0x0e, 0x54, 0x63, 0x70, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65,
+ 0x63, 0x6b, 0x12, 0x3d, 0x0a, 0x04, 0x73, 0x65, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e,
+ 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68,
+ 0x65, 0x63, 0x6b, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x04, 0x73, 0x65, 0x6e,
+ 0x64, 0x12, 0x43, 0x0a, 0x07, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x18, 0x02, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68,
+ 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x07, 0x72,
+ 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x3a, 0x33, 0x9a, 0xc5, 0x88, 0x1e, 0x2e, 0x0a, 0x2c, 0x65,
+ 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65,
+ 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x54, 0x63, 0x70,
+ 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x1a, 0x5b, 0x0a, 0x10, 0x52,
+ 0x65, 0x64, 0x69, 0x73, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12,
+ 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65,
+ 0x79, 0x3a, 0x35, 0x9a, 0xc5, 0x88, 0x1e, 0x30, 0x0a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e,
+ 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c,
+ 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x52, 0x65, 0x64, 0x69, 0x73, 0x48, 0x65, 0x61,
+ 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x1a, 0xf4, 0x01, 0x0a, 0x0f, 0x47, 0x72, 0x70,
+ 0x63, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x21, 0x0a, 0x0c,
+ 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12,
+ 0x29, 0x0a, 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc0, 0x01, 0x02, 0xc8, 0x01, 0x00, 0x52,
+ 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x5d, 0x0a, 0x10, 0x69, 0x6e,
+ 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64,
+ 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x09, 0xfa,
+ 0x42, 0x06, 0x92, 0x01, 0x03, 0x10, 0xe8, 0x07, 0x52, 0x0f, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61,
+ 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x3a, 0x34, 0x9a, 0xc5, 0x88, 0x1e, 0x2f,
+ 0x0a, 0x2d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63,
+ 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e,
+ 0x47, 0x72, 0x70, 0x63, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x1a,
+ 0xc0, 0x01, 0x0a, 0x11, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68,
+ 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61,
+ 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00,
+ 0x52, 0x0b, 0x74, 0x79, 0x70, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x36, 0x9a,
+ 0xc5, 0x88, 0x1e, 0x31, 0x0a, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e,
+ 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68,
+ 0x65, 0x63, 0x6b, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68,
+ 0x43, 0x68, 0x65, 0x63, 0x6b, 0x42, 0x0d, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f,
+ 0x74, 0x79, 0x70, 0x65, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x1a, 0x64, 0x0a, 0x0a, 0x54, 0x6c, 0x73, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x12, 0x25, 0x0a, 0x0e, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f,
+ 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x61, 0x6c, 0x70, 0x6e, 0x50, 0x72,
+ 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x3a, 0x2f, 0x9a, 0xc5, 0x88, 0x1e, 0x2a, 0x0a, 0x28,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72,
+ 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x54, 0x6c,
+ 0x73, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3a, 0x24, 0x9a, 0xc5, 0x88, 0x1e, 0x1f, 0x0a,
+ 0x1d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f,
+ 0x72, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x42, 0x15,
+ 0x0a, 0x0e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x72,
+ 0x12, 0x03, 0xf8, 0x42, 0x01, 0x4a, 0x04, 0x08, 0x0a, 0x10, 0x0b, 0x2a, 0x60, 0x0a, 0x0c, 0x48,
+ 0x65, 0x61, 0x6c, 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55,
+ 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x48, 0x45, 0x41, 0x4c,
+ 0x54, 0x48, 0x59, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x55, 0x4e, 0x48, 0x45, 0x41, 0x4c, 0x54,
+ 0x48, 0x59, 0x10, 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x52, 0x41, 0x49, 0x4e, 0x49, 0x4e, 0x47,
+ 0x10, 0x03, 0x12, 0x0b, 0x0a, 0x07, 0x54, 0x49, 0x4d, 0x45, 0x4f, 0x55, 0x54, 0x10, 0x04, 0x12,
+ 0x0c, 0x0a, 0x08, 0x44, 0x45, 0x47, 0x52, 0x41, 0x44, 0x45, 0x44, 0x10, 0x05, 0x42, 0x84, 0x01,
+ 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72,
+ 0x65, 0x2e, 0x76, 0x33, 0x42, 0x10, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63,
+ 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62,
+ 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f,
+ 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65,
+ 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f,
+ 0x72, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x63, 0x6f, 0x72, 0x65, 0x76, 0x33, 0xba, 0x80, 0xc8, 0xd1,
+ 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_config_core_v3_health_check_proto_rawDescOnce sync.Once
+ file_envoy_config_core_v3_health_check_proto_rawDescData = file_envoy_config_core_v3_health_check_proto_rawDesc
+)
+
+func file_envoy_config_core_v3_health_check_proto_rawDescGZIP() []byte {
+ file_envoy_config_core_v3_health_check_proto_rawDescOnce.Do(func() {
+ file_envoy_config_core_v3_health_check_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_core_v3_health_check_proto_rawDescData)
+ })
+ return file_envoy_config_core_v3_health_check_proto_rawDescData
+}
+
+var file_envoy_config_core_v3_health_check_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_envoy_config_core_v3_health_check_proto_msgTypes = make([]protoimpl.MessageInfo, 9)
+var file_envoy_config_core_v3_health_check_proto_goTypes = []interface{}{
+ (HealthStatus)(0), // 0: envoy.config.core.v3.HealthStatus
+ (*HealthStatusSet)(nil), // 1: envoy.config.core.v3.HealthStatusSet
+ (*HealthCheck)(nil), // 2: envoy.config.core.v3.HealthCheck
+ (*HealthCheck_Payload)(nil), // 3: envoy.config.core.v3.HealthCheck.Payload
+ (*HealthCheck_HttpHealthCheck)(nil), // 4: envoy.config.core.v3.HealthCheck.HttpHealthCheck
+ (*HealthCheck_TcpHealthCheck)(nil), // 5: envoy.config.core.v3.HealthCheck.TcpHealthCheck
+ (*HealthCheck_RedisHealthCheck)(nil), // 6: envoy.config.core.v3.HealthCheck.RedisHealthCheck
+ (*HealthCheck_GrpcHealthCheck)(nil), // 7: envoy.config.core.v3.HealthCheck.GrpcHealthCheck
+ (*HealthCheck_CustomHealthCheck)(nil), // 8: envoy.config.core.v3.HealthCheck.CustomHealthCheck
+ (*HealthCheck_TlsOptions)(nil), // 9: envoy.config.core.v3.HealthCheck.TlsOptions
+ (*durationpb.Duration)(nil), // 10: google.protobuf.Duration
+ (*wrapperspb.UInt32Value)(nil), // 11: google.protobuf.UInt32Value
+ (*wrapperspb.BoolValue)(nil), // 12: google.protobuf.BoolValue
+ (*TypedExtensionConfig)(nil), // 13: envoy.config.core.v3.TypedExtensionConfig
+ (*EventServiceConfig)(nil), // 14: envoy.config.core.v3.EventServiceConfig
+ (*structpb.Struct)(nil), // 15: google.protobuf.Struct
+ (*wrapperspb.UInt64Value)(nil), // 16: google.protobuf.UInt64Value
+ (*HeaderValueOption)(nil), // 17: envoy.config.core.v3.HeaderValueOption
+ (*v3.Int64Range)(nil), // 18: envoy.type.v3.Int64Range
+ (v3.CodecClientType)(0), // 19: envoy.type.v3.CodecClientType
+ (*v31.StringMatcher)(nil), // 20: envoy.type.matcher.v3.StringMatcher
+ (RequestMethod)(0), // 21: envoy.config.core.v3.RequestMethod
+ (*anypb.Any)(nil), // 22: google.protobuf.Any
+}
+var file_envoy_config_core_v3_health_check_proto_depIdxs = []int32{
+ 0, // 0: envoy.config.core.v3.HealthStatusSet.statuses:type_name -> envoy.config.core.v3.HealthStatus
+ 10, // 1: envoy.config.core.v3.HealthCheck.timeout:type_name -> google.protobuf.Duration
+ 10, // 2: envoy.config.core.v3.HealthCheck.interval:type_name -> google.protobuf.Duration
+ 10, // 3: envoy.config.core.v3.HealthCheck.initial_jitter:type_name -> google.protobuf.Duration
+ 10, // 4: envoy.config.core.v3.HealthCheck.interval_jitter:type_name -> google.protobuf.Duration
+ 11, // 5: envoy.config.core.v3.HealthCheck.unhealthy_threshold:type_name -> google.protobuf.UInt32Value
+ 11, // 6: envoy.config.core.v3.HealthCheck.healthy_threshold:type_name -> google.protobuf.UInt32Value
+ 11, // 7: envoy.config.core.v3.HealthCheck.alt_port:type_name -> google.protobuf.UInt32Value
+ 12, // 8: envoy.config.core.v3.HealthCheck.reuse_connection:type_name -> google.protobuf.BoolValue
+ 4, // 9: envoy.config.core.v3.HealthCheck.http_health_check:type_name -> envoy.config.core.v3.HealthCheck.HttpHealthCheck
+ 5, // 10: envoy.config.core.v3.HealthCheck.tcp_health_check:type_name -> envoy.config.core.v3.HealthCheck.TcpHealthCheck
+ 7, // 11: envoy.config.core.v3.HealthCheck.grpc_health_check:type_name -> envoy.config.core.v3.HealthCheck.GrpcHealthCheck
+ 8, // 12: envoy.config.core.v3.HealthCheck.custom_health_check:type_name -> envoy.config.core.v3.HealthCheck.CustomHealthCheck
+ 10, // 13: envoy.config.core.v3.HealthCheck.no_traffic_interval:type_name -> google.protobuf.Duration
+ 10, // 14: envoy.config.core.v3.HealthCheck.no_traffic_healthy_interval:type_name -> google.protobuf.Duration
+ 10, // 15: envoy.config.core.v3.HealthCheck.unhealthy_interval:type_name -> google.protobuf.Duration
+ 10, // 16: envoy.config.core.v3.HealthCheck.unhealthy_edge_interval:type_name -> google.protobuf.Duration
+ 10, // 17: envoy.config.core.v3.HealthCheck.healthy_edge_interval:type_name -> google.protobuf.Duration
+ 13, // 18: envoy.config.core.v3.HealthCheck.event_logger:type_name -> envoy.config.core.v3.TypedExtensionConfig
+ 14, // 19: envoy.config.core.v3.HealthCheck.event_service:type_name -> envoy.config.core.v3.EventServiceConfig
+ 9, // 20: envoy.config.core.v3.HealthCheck.tls_options:type_name -> envoy.config.core.v3.HealthCheck.TlsOptions
+ 15, // 21: envoy.config.core.v3.HealthCheck.transport_socket_match_criteria:type_name -> google.protobuf.Struct
+ 3, // 22: envoy.config.core.v3.HealthCheck.HttpHealthCheck.send:type_name -> envoy.config.core.v3.HealthCheck.Payload
+ 3, // 23: envoy.config.core.v3.HealthCheck.HttpHealthCheck.receive:type_name -> envoy.config.core.v3.HealthCheck.Payload
+ 16, // 24: envoy.config.core.v3.HealthCheck.HttpHealthCheck.response_buffer_size:type_name -> google.protobuf.UInt64Value
+ 17, // 25: envoy.config.core.v3.HealthCheck.HttpHealthCheck.request_headers_to_add:type_name -> envoy.config.core.v3.HeaderValueOption
+ 18, // 26: envoy.config.core.v3.HealthCheck.HttpHealthCheck.expected_statuses:type_name -> envoy.type.v3.Int64Range
+ 18, // 27: envoy.config.core.v3.HealthCheck.HttpHealthCheck.retriable_statuses:type_name -> envoy.type.v3.Int64Range
+ 19, // 28: envoy.config.core.v3.HealthCheck.HttpHealthCheck.codec_client_type:type_name -> envoy.type.v3.CodecClientType
+ 20, // 29: envoy.config.core.v3.HealthCheck.HttpHealthCheck.service_name_matcher:type_name -> envoy.type.matcher.v3.StringMatcher
+ 21, // 30: envoy.config.core.v3.HealthCheck.HttpHealthCheck.method:type_name -> envoy.config.core.v3.RequestMethod
+ 3, // 31: envoy.config.core.v3.HealthCheck.TcpHealthCheck.send:type_name -> envoy.config.core.v3.HealthCheck.Payload
+ 3, // 32: envoy.config.core.v3.HealthCheck.TcpHealthCheck.receive:type_name -> envoy.config.core.v3.HealthCheck.Payload
+ 17, // 33: envoy.config.core.v3.HealthCheck.GrpcHealthCheck.initial_metadata:type_name -> envoy.config.core.v3.HeaderValueOption
+ 22, // 34: envoy.config.core.v3.HealthCheck.CustomHealthCheck.typed_config:type_name -> google.protobuf.Any
+ 35, // [35:35] is the sub-list for method output_type
+ 35, // [35:35] is the sub-list for method input_type
+ 35, // [35:35] is the sub-list for extension type_name
+ 35, // [35:35] is the sub-list for extension extendee
+ 0, // [0:35] is the sub-list for field type_name
+}
+
+func init() { file_envoy_config_core_v3_health_check_proto_init() }
+func file_envoy_config_core_v3_health_check_proto_init() {
+ if File_envoy_config_core_v3_health_check_proto != nil {
+ return
+ }
+ file_envoy_config_core_v3_base_proto_init()
+ file_envoy_config_core_v3_event_service_config_proto_init()
+ file_envoy_config_core_v3_extension_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_config_core_v3_health_check_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*HealthStatusSet); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_health_check_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*HealthCheck); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_health_check_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*HealthCheck_Payload); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_health_check_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*HealthCheck_HttpHealthCheck); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_health_check_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*HealthCheck_TcpHealthCheck); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_health_check_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*HealthCheck_RedisHealthCheck); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_health_check_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*HealthCheck_GrpcHealthCheck); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_health_check_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*HealthCheck_CustomHealthCheck); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_health_check_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*HealthCheck_TlsOptions); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_envoy_config_core_v3_health_check_proto_msgTypes[1].OneofWrappers = []interface{}{
+ (*HealthCheck_HttpHealthCheck_)(nil),
+ (*HealthCheck_TcpHealthCheck_)(nil),
+ (*HealthCheck_GrpcHealthCheck_)(nil),
+ (*HealthCheck_CustomHealthCheck_)(nil),
+ }
+ file_envoy_config_core_v3_health_check_proto_msgTypes[2].OneofWrappers = []interface{}{
+ (*HealthCheck_Payload_Text)(nil),
+ (*HealthCheck_Payload_Binary)(nil),
+ }
+ file_envoy_config_core_v3_health_check_proto_msgTypes[7].OneofWrappers = []interface{}{
+ (*HealthCheck_CustomHealthCheck_TypedConfig)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_config_core_v3_health_check_proto_rawDesc,
+ NumEnums: 1,
+ NumMessages: 9,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_config_core_v3_health_check_proto_goTypes,
+ DependencyIndexes: file_envoy_config_core_v3_health_check_proto_depIdxs,
+ EnumInfos: file_envoy_config_core_v3_health_check_proto_enumTypes,
+ MessageInfos: file_envoy_config_core_v3_health_check_proto_msgTypes,
+ }.Build()
+ File_envoy_config_core_v3_health_check_proto = out.File
+ file_envoy_config_core_v3_health_check_proto_rawDesc = nil
+ file_envoy_config_core_v3_health_check_proto_goTypes = nil
+ file_envoy_config_core_v3_health_check_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/health_check.pb.validate.go b/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/health_check.pb.validate.go
new file mode 100644
index 000000000..d496267b7
--- /dev/null
+++ b/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/health_check.pb.validate.go
@@ -0,0 +1,2259 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/config/core/v3/health_check.proto
+
+package corev3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+
+ v3 "github.com/cilium/proxy/go/envoy/type/v3"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+
+ _ = v3.CodecClientType(0)
+)
+
+// Validate checks the field values on HealthStatusSet with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// first error encountered is returned, or nil if there are no violations.
+func (m *HealthStatusSet) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on HealthStatusSet with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// HealthStatusSetMultiError, or nil if none found.
+func (m *HealthStatusSet) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *HealthStatusSet) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ for idx, item := range m.GetStatuses() {
+ _, _ = idx, item
+
+ if _, ok := HealthStatus_name[int32(item)]; !ok {
+ err := HealthStatusSetValidationError{
+ field: fmt.Sprintf("Statuses[%v]", idx),
+ reason: "value must be one of the defined enum values",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return HealthStatusSetMultiError(errors)
+ }
+
+ return nil
+}
+
+// HealthStatusSetMultiError is an error wrapping multiple validation errors
+// returned by HealthStatusSet.ValidateAll() if the designated constraints
+// aren't met.
+type HealthStatusSetMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m HealthStatusSetMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m HealthStatusSetMultiError) AllErrors() []error { return m }
+
+// HealthStatusSetValidationError is the validation error returned by
+// HealthStatusSet.Validate if the designated constraints aren't met.
+type HealthStatusSetValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e HealthStatusSetValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e HealthStatusSetValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e HealthStatusSetValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e HealthStatusSetValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e HealthStatusSetValidationError) ErrorName() string { return "HealthStatusSetValidationError" }
+
+// Error satisfies the builtin error interface
+func (e HealthStatusSetValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sHealthStatusSet.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = HealthStatusSetValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = HealthStatusSetValidationError{}
+
+// Validate checks the field values on HealthCheck with the rules defined in
+// the proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *HealthCheck) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on HealthCheck with the rules defined in
+// the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in HealthCheckMultiError, or
+// nil if none found.
+func (m *HealthCheck) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *HealthCheck) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if m.GetTimeout() == nil {
+ err := HealthCheckValidationError{
+ field: "Timeout",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if d := m.GetTimeout(); d != nil {
+ dur, err := d.AsDuration(), d.CheckValid()
+ if err != nil {
+ err = HealthCheckValidationError{
+ field: "Timeout",
+ reason: "value is not a valid duration",
+ cause: err,
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ } else {
+
+ gt := time.Duration(0*time.Second + 0*time.Nanosecond)
+
+ if dur <= gt {
+ err := HealthCheckValidationError{
+ field: "Timeout",
+ reason: "value must be greater than 0s",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ }
+ }
+
+ if m.GetInterval() == nil {
+ err := HealthCheckValidationError{
+ field: "Interval",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if d := m.GetInterval(); d != nil {
+ dur, err := d.AsDuration(), d.CheckValid()
+ if err != nil {
+ err = HealthCheckValidationError{
+ field: "Interval",
+ reason: "value is not a valid duration",
+ cause: err,
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ } else {
+
+ gt := time.Duration(0*time.Second + 0*time.Nanosecond)
+
+ if dur <= gt {
+ err := HealthCheckValidationError{
+ field: "Interval",
+ reason: "value must be greater than 0s",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetInitialJitter()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, HealthCheckValidationError{
+ field: "InitialJitter",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, HealthCheckValidationError{
+ field: "InitialJitter",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetInitialJitter()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HealthCheckValidationError{
+ field: "InitialJitter",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetIntervalJitter()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, HealthCheckValidationError{
+ field: "IntervalJitter",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, HealthCheckValidationError{
+ field: "IntervalJitter",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetIntervalJitter()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HealthCheckValidationError{
+ field: "IntervalJitter",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for IntervalJitterPercent
+
+ if m.GetUnhealthyThreshold() == nil {
+ err := HealthCheckValidationError{
+ field: "UnhealthyThreshold",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetUnhealthyThreshold()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, HealthCheckValidationError{
+ field: "UnhealthyThreshold",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, HealthCheckValidationError{
+ field: "UnhealthyThreshold",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetUnhealthyThreshold()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HealthCheckValidationError{
+ field: "UnhealthyThreshold",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if m.GetHealthyThreshold() == nil {
+ err := HealthCheckValidationError{
+ field: "HealthyThreshold",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetHealthyThreshold()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, HealthCheckValidationError{
+ field: "HealthyThreshold",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, HealthCheckValidationError{
+ field: "HealthyThreshold",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetHealthyThreshold()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HealthCheckValidationError{
+ field: "HealthyThreshold",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetAltPort()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, HealthCheckValidationError{
+ field: "AltPort",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, HealthCheckValidationError{
+ field: "AltPort",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetAltPort()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HealthCheckValidationError{
+ field: "AltPort",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetReuseConnection()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, HealthCheckValidationError{
+ field: "ReuseConnection",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, HealthCheckValidationError{
+ field: "ReuseConnection",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetReuseConnection()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HealthCheckValidationError{
+ field: "ReuseConnection",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if d := m.GetNoTrafficInterval(); d != nil {
+ dur, err := d.AsDuration(), d.CheckValid()
+ if err != nil {
+ err = HealthCheckValidationError{
+ field: "NoTrafficInterval",
+ reason: "value is not a valid duration",
+ cause: err,
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ } else {
+
+ gt := time.Duration(0*time.Second + 0*time.Nanosecond)
+
+ if dur <= gt {
+ err := HealthCheckValidationError{
+ field: "NoTrafficInterval",
+ reason: "value must be greater than 0s",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ }
+ }
+
+ if d := m.GetNoTrafficHealthyInterval(); d != nil {
+ dur, err := d.AsDuration(), d.CheckValid()
+ if err != nil {
+ err = HealthCheckValidationError{
+ field: "NoTrafficHealthyInterval",
+ reason: "value is not a valid duration",
+ cause: err,
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ } else {
+
+ gt := time.Duration(0*time.Second + 0*time.Nanosecond)
+
+ if dur <= gt {
+ err := HealthCheckValidationError{
+ field: "NoTrafficHealthyInterval",
+ reason: "value must be greater than 0s",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ }
+ }
+
+ if d := m.GetUnhealthyInterval(); d != nil {
+ dur, err := d.AsDuration(), d.CheckValid()
+ if err != nil {
+ err = HealthCheckValidationError{
+ field: "UnhealthyInterval",
+ reason: "value is not a valid duration",
+ cause: err,
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ } else {
+
+ gt := time.Duration(0*time.Second + 0*time.Nanosecond)
+
+ if dur <= gt {
+ err := HealthCheckValidationError{
+ field: "UnhealthyInterval",
+ reason: "value must be greater than 0s",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ }
+ }
+
+ if d := m.GetUnhealthyEdgeInterval(); d != nil {
+ dur, err := d.AsDuration(), d.CheckValid()
+ if err != nil {
+ err = HealthCheckValidationError{
+ field: "UnhealthyEdgeInterval",
+ reason: "value is not a valid duration",
+ cause: err,
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ } else {
+
+ gt := time.Duration(0*time.Second + 0*time.Nanosecond)
+
+ if dur <= gt {
+ err := HealthCheckValidationError{
+ field: "UnhealthyEdgeInterval",
+ reason: "value must be greater than 0s",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ }
+ }
+
+ if d := m.GetHealthyEdgeInterval(); d != nil {
+ dur, err := d.AsDuration(), d.CheckValid()
+ if err != nil {
+ err = HealthCheckValidationError{
+ field: "HealthyEdgeInterval",
+ reason: "value is not a valid duration",
+ cause: err,
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ } else {
+
+ gt := time.Duration(0*time.Second + 0*time.Nanosecond)
+
+ if dur <= gt {
+ err := HealthCheckValidationError{
+ field: "HealthyEdgeInterval",
+ reason: "value must be greater than 0s",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ }
+ }
+
+ // no validation rules for EventLogPath
+
+ for idx, item := range m.GetEventLogger() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, HealthCheckValidationError{
+ field: fmt.Sprintf("EventLogger[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, HealthCheckValidationError{
+ field: fmt.Sprintf("EventLogger[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HealthCheckValidationError{
+ field: fmt.Sprintf("EventLogger[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if all {
+ switch v := interface{}(m.GetEventService()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, HealthCheckValidationError{
+ field: "EventService",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, HealthCheckValidationError{
+ field: "EventService",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetEventService()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HealthCheckValidationError{
+ field: "EventService",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for AlwaysLogHealthCheckFailures
+
+ if all {
+ switch v := interface{}(m.GetTlsOptions()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, HealthCheckValidationError{
+ field: "TlsOptions",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, HealthCheckValidationError{
+ field: "TlsOptions",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetTlsOptions()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HealthCheckValidationError{
+ field: "TlsOptions",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetTransportSocketMatchCriteria()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, HealthCheckValidationError{
+ field: "TransportSocketMatchCriteria",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, HealthCheckValidationError{
+ field: "TransportSocketMatchCriteria",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetTransportSocketMatchCriteria()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HealthCheckValidationError{
+ field: "TransportSocketMatchCriteria",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ oneofHealthCheckerPresent := false
+ switch v := m.HealthChecker.(type) {
+ case *HealthCheck_HttpHealthCheck_:
+ if v == nil {
+ err := HealthCheckValidationError{
+ field: "HealthChecker",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofHealthCheckerPresent = true
+
+ if all {
+ switch v := interface{}(m.GetHttpHealthCheck()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, HealthCheckValidationError{
+ field: "HttpHealthCheck",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, HealthCheckValidationError{
+ field: "HttpHealthCheck",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetHttpHealthCheck()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HealthCheckValidationError{
+ field: "HttpHealthCheck",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *HealthCheck_TcpHealthCheck_:
+ if v == nil {
+ err := HealthCheckValidationError{
+ field: "HealthChecker",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofHealthCheckerPresent = true
+
+ if all {
+ switch v := interface{}(m.GetTcpHealthCheck()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, HealthCheckValidationError{
+ field: "TcpHealthCheck",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, HealthCheckValidationError{
+ field: "TcpHealthCheck",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetTcpHealthCheck()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HealthCheckValidationError{
+ field: "TcpHealthCheck",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *HealthCheck_GrpcHealthCheck_:
+ if v == nil {
+ err := HealthCheckValidationError{
+ field: "HealthChecker",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofHealthCheckerPresent = true
+
+ if all {
+ switch v := interface{}(m.GetGrpcHealthCheck()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, HealthCheckValidationError{
+ field: "GrpcHealthCheck",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, HealthCheckValidationError{
+ field: "GrpcHealthCheck",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetGrpcHealthCheck()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HealthCheckValidationError{
+ field: "GrpcHealthCheck",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *HealthCheck_CustomHealthCheck_:
+ if v == nil {
+ err := HealthCheckValidationError{
+ field: "HealthChecker",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofHealthCheckerPresent = true
+
+ if all {
+ switch v := interface{}(m.GetCustomHealthCheck()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, HealthCheckValidationError{
+ field: "CustomHealthCheck",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, HealthCheckValidationError{
+ field: "CustomHealthCheck",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetCustomHealthCheck()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HealthCheckValidationError{
+ field: "CustomHealthCheck",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ default:
+ _ = v // ensures v is used
+ }
+ if !oneofHealthCheckerPresent {
+ err := HealthCheckValidationError{
+ field: "HealthChecker",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return HealthCheckMultiError(errors)
+ }
+
+ return nil
+}
+
+// HealthCheckMultiError is an error wrapping multiple validation errors
+// returned by HealthCheck.ValidateAll() if the designated constraints aren't met.
+type HealthCheckMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m HealthCheckMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m HealthCheckMultiError) AllErrors() []error { return m }
+
+// HealthCheckValidationError is the validation error returned by
+// HealthCheck.Validate if the designated constraints aren't met.
+type HealthCheckValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e HealthCheckValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e HealthCheckValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e HealthCheckValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e HealthCheckValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e HealthCheckValidationError) ErrorName() string { return "HealthCheckValidationError" }
+
+// Error satisfies the builtin error interface
+func (e HealthCheckValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sHealthCheck.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = HealthCheckValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = HealthCheckValidationError{}
+
+// Validate checks the field values on HealthCheck_Payload with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *HealthCheck_Payload) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on HealthCheck_Payload with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// HealthCheck_PayloadMultiError, or nil if none found.
+func (m *HealthCheck_Payload) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *HealthCheck_Payload) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ oneofPayloadPresent := false
+ switch v := m.Payload.(type) {
+ case *HealthCheck_Payload_Text:
+ if v == nil {
+ err := HealthCheck_PayloadValidationError{
+ field: "Payload",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofPayloadPresent = true
+
+ if utf8.RuneCountInString(m.GetText()) < 1 {
+ err := HealthCheck_PayloadValidationError{
+ field: "Text",
+ reason: "value length must be at least 1 runes",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ case *HealthCheck_Payload_Binary:
+ if v == nil {
+ err := HealthCheck_PayloadValidationError{
+ field: "Payload",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofPayloadPresent = true
+ // no validation rules for Binary
+ default:
+ _ = v // ensures v is used
+ }
+ if !oneofPayloadPresent {
+ err := HealthCheck_PayloadValidationError{
+ field: "Payload",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return HealthCheck_PayloadMultiError(errors)
+ }
+
+ return nil
+}
+
+// HealthCheck_PayloadMultiError is an error wrapping multiple validation
+// errors returned by HealthCheck_Payload.ValidateAll() if the designated
+// constraints aren't met.
+type HealthCheck_PayloadMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m HealthCheck_PayloadMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m HealthCheck_PayloadMultiError) AllErrors() []error { return m }
+
+// HealthCheck_PayloadValidationError is the validation error returned by
+// HealthCheck_Payload.Validate if the designated constraints aren't met.
+type HealthCheck_PayloadValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e HealthCheck_PayloadValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e HealthCheck_PayloadValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e HealthCheck_PayloadValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e HealthCheck_PayloadValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e HealthCheck_PayloadValidationError) ErrorName() string {
+ return "HealthCheck_PayloadValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e HealthCheck_PayloadValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sHealthCheck_Payload.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = HealthCheck_PayloadValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = HealthCheck_PayloadValidationError{}
+
+// Validate checks the field values on HealthCheck_HttpHealthCheck with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *HealthCheck_HttpHealthCheck) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on HealthCheck_HttpHealthCheck with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// HealthCheck_HttpHealthCheckMultiError, or nil if none found.
+func (m *HealthCheck_HttpHealthCheck) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *HealthCheck_HttpHealthCheck) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if !_HealthCheck_HttpHealthCheck_Host_Pattern.MatchString(m.GetHost()) {
+ err := HealthCheck_HttpHealthCheckValidationError{
+ field: "Host",
+ reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if utf8.RuneCountInString(m.GetPath()) < 1 {
+ err := HealthCheck_HttpHealthCheckValidationError{
+ field: "Path",
+ reason: "value length must be at least 1 runes",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if !_HealthCheck_HttpHealthCheck_Path_Pattern.MatchString(m.GetPath()) {
+ err := HealthCheck_HttpHealthCheckValidationError{
+ field: "Path",
+ reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetSend()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, HealthCheck_HttpHealthCheckValidationError{
+ field: "Send",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, HealthCheck_HttpHealthCheckValidationError{
+ field: "Send",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetSend()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HealthCheck_HttpHealthCheckValidationError{
+ field: "Send",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ for idx, item := range m.GetReceive() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, HealthCheck_HttpHealthCheckValidationError{
+ field: fmt.Sprintf("Receive[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, HealthCheck_HttpHealthCheckValidationError{
+ field: fmt.Sprintf("Receive[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HealthCheck_HttpHealthCheckValidationError{
+ field: fmt.Sprintf("Receive[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if wrapper := m.GetResponseBufferSize(); wrapper != nil {
+
+ if wrapper.GetValue() < 0 {
+ err := HealthCheck_HttpHealthCheckValidationError{
+ field: "ResponseBufferSize",
+ reason: "value must be greater than or equal to 0",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ }
+
+ if len(m.GetRequestHeadersToAdd()) > 1000 {
+ err := HealthCheck_HttpHealthCheckValidationError{
+ field: "RequestHeadersToAdd",
+ reason: "value must contain no more than 1000 item(s)",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ for idx, item := range m.GetRequestHeadersToAdd() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, HealthCheck_HttpHealthCheckValidationError{
+ field: fmt.Sprintf("RequestHeadersToAdd[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, HealthCheck_HttpHealthCheckValidationError{
+ field: fmt.Sprintf("RequestHeadersToAdd[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HealthCheck_HttpHealthCheckValidationError{
+ field: fmt.Sprintf("RequestHeadersToAdd[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ for idx, item := range m.GetRequestHeadersToRemove() {
+ _, _ = idx, item
+
+ if !_HealthCheck_HttpHealthCheck_RequestHeadersToRemove_Pattern.MatchString(item) {
+ err := HealthCheck_HttpHealthCheckValidationError{
+ field: fmt.Sprintf("RequestHeadersToRemove[%v]", idx),
+ reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ }
+
+ for idx, item := range m.GetExpectedStatuses() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, HealthCheck_HttpHealthCheckValidationError{
+ field: fmt.Sprintf("ExpectedStatuses[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, HealthCheck_HttpHealthCheckValidationError{
+ field: fmt.Sprintf("ExpectedStatuses[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HealthCheck_HttpHealthCheckValidationError{
+ field: fmt.Sprintf("ExpectedStatuses[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ for idx, item := range m.GetRetriableStatuses() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, HealthCheck_HttpHealthCheckValidationError{
+ field: fmt.Sprintf("RetriableStatuses[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, HealthCheck_HttpHealthCheckValidationError{
+ field: fmt.Sprintf("RetriableStatuses[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HealthCheck_HttpHealthCheckValidationError{
+ field: fmt.Sprintf("RetriableStatuses[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if _, ok := v3.CodecClientType_name[int32(m.GetCodecClientType())]; !ok {
+ err := HealthCheck_HttpHealthCheckValidationError{
+ field: "CodecClientType",
+ reason: "value must be one of the defined enum values",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetServiceNameMatcher()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, HealthCheck_HttpHealthCheckValidationError{
+ field: "ServiceNameMatcher",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, HealthCheck_HttpHealthCheckValidationError{
+ field: "ServiceNameMatcher",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetServiceNameMatcher()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HealthCheck_HttpHealthCheckValidationError{
+ field: "ServiceNameMatcher",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if _, ok := _HealthCheck_HttpHealthCheck_Method_NotInLookup[m.GetMethod()]; ok {
+ err := HealthCheck_HttpHealthCheckValidationError{
+ field: "Method",
+ reason: "value must not be in list [CONNECT]",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if _, ok := RequestMethod_name[int32(m.GetMethod())]; !ok {
+ err := HealthCheck_HttpHealthCheckValidationError{
+ field: "Method",
+ reason: "value must be one of the defined enum values",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return HealthCheck_HttpHealthCheckMultiError(errors)
+ }
+
+ return nil
+}
+
+// HealthCheck_HttpHealthCheckMultiError is an error wrapping multiple
+// validation errors returned by HealthCheck_HttpHealthCheck.ValidateAll() if
+// the designated constraints aren't met.
+type HealthCheck_HttpHealthCheckMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m HealthCheck_HttpHealthCheckMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m HealthCheck_HttpHealthCheckMultiError) AllErrors() []error { return m }
+
+// HealthCheck_HttpHealthCheckValidationError is the validation error returned
+// by HealthCheck_HttpHealthCheck.Validate if the designated constraints
+// aren't met.
+type HealthCheck_HttpHealthCheckValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e HealthCheck_HttpHealthCheckValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e HealthCheck_HttpHealthCheckValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e HealthCheck_HttpHealthCheckValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e HealthCheck_HttpHealthCheckValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e HealthCheck_HttpHealthCheckValidationError) ErrorName() string {
+ return "HealthCheck_HttpHealthCheckValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e HealthCheck_HttpHealthCheckValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sHealthCheck_HttpHealthCheck.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = HealthCheck_HttpHealthCheckValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = HealthCheck_HttpHealthCheckValidationError{}
+
+var _HealthCheck_HttpHealthCheck_Host_Pattern = regexp.MustCompile("^[^\x00\n\r]*$")
+
+var _HealthCheck_HttpHealthCheck_Path_Pattern = regexp.MustCompile("^[^\x00\n\r]*$")
+
+var _HealthCheck_HttpHealthCheck_RequestHeadersToRemove_Pattern = regexp.MustCompile("^[^\x00\n\r]*$")
+
+var _HealthCheck_HttpHealthCheck_Method_NotInLookup = map[RequestMethod]struct{}{
+ 6: {},
+}
+
+// Validate checks the field values on HealthCheck_TcpHealthCheck with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *HealthCheck_TcpHealthCheck) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on HealthCheck_TcpHealthCheck with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// HealthCheck_TcpHealthCheckMultiError, or nil if none found.
+func (m *HealthCheck_TcpHealthCheck) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *HealthCheck_TcpHealthCheck) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if all {
+ switch v := interface{}(m.GetSend()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, HealthCheck_TcpHealthCheckValidationError{
+ field: "Send",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, HealthCheck_TcpHealthCheckValidationError{
+ field: "Send",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetSend()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HealthCheck_TcpHealthCheckValidationError{
+ field: "Send",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ for idx, item := range m.GetReceive() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, HealthCheck_TcpHealthCheckValidationError{
+ field: fmt.Sprintf("Receive[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, HealthCheck_TcpHealthCheckValidationError{
+ field: fmt.Sprintf("Receive[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HealthCheck_TcpHealthCheckValidationError{
+ field: fmt.Sprintf("Receive[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return HealthCheck_TcpHealthCheckMultiError(errors)
+ }
+
+ return nil
+}
+
+// HealthCheck_TcpHealthCheckMultiError is an error wrapping multiple
+// validation errors returned by HealthCheck_TcpHealthCheck.ValidateAll() if
+// the designated constraints aren't met.
+type HealthCheck_TcpHealthCheckMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m HealthCheck_TcpHealthCheckMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m HealthCheck_TcpHealthCheckMultiError) AllErrors() []error { return m }
+
+// HealthCheck_TcpHealthCheckValidationError is the validation error returned
+// by HealthCheck_TcpHealthCheck.Validate if the designated constraints aren't met.
+type HealthCheck_TcpHealthCheckValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e HealthCheck_TcpHealthCheckValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e HealthCheck_TcpHealthCheckValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e HealthCheck_TcpHealthCheckValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e HealthCheck_TcpHealthCheckValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e HealthCheck_TcpHealthCheckValidationError) ErrorName() string {
+ return "HealthCheck_TcpHealthCheckValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e HealthCheck_TcpHealthCheckValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sHealthCheck_TcpHealthCheck.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = HealthCheck_TcpHealthCheckValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = HealthCheck_TcpHealthCheckValidationError{}
+
+// Validate checks the field values on HealthCheck_RedisHealthCheck with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *HealthCheck_RedisHealthCheck) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on HealthCheck_RedisHealthCheck with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// HealthCheck_RedisHealthCheckMultiError, or nil if none found.
+func (m *HealthCheck_RedisHealthCheck) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *HealthCheck_RedisHealthCheck) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for Key
+
+ if len(errors) > 0 {
+ return HealthCheck_RedisHealthCheckMultiError(errors)
+ }
+
+ return nil
+}
+
+// HealthCheck_RedisHealthCheckMultiError is an error wrapping multiple
+// validation errors returned by HealthCheck_RedisHealthCheck.ValidateAll() if
+// the designated constraints aren't met.
+type HealthCheck_RedisHealthCheckMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m HealthCheck_RedisHealthCheckMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m HealthCheck_RedisHealthCheckMultiError) AllErrors() []error { return m }
+
+// HealthCheck_RedisHealthCheckValidationError is the validation error returned
+// by HealthCheck_RedisHealthCheck.Validate if the designated constraints
+// aren't met.
+type HealthCheck_RedisHealthCheckValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e HealthCheck_RedisHealthCheckValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e HealthCheck_RedisHealthCheckValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e HealthCheck_RedisHealthCheckValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e HealthCheck_RedisHealthCheckValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e HealthCheck_RedisHealthCheckValidationError) ErrorName() string {
+ return "HealthCheck_RedisHealthCheckValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e HealthCheck_RedisHealthCheckValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sHealthCheck_RedisHealthCheck.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = HealthCheck_RedisHealthCheckValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = HealthCheck_RedisHealthCheckValidationError{}
+
+// Validate checks the field values on HealthCheck_GrpcHealthCheck with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *HealthCheck_GrpcHealthCheck) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on HealthCheck_GrpcHealthCheck with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// HealthCheck_GrpcHealthCheckMultiError, or nil if none found.
+func (m *HealthCheck_GrpcHealthCheck) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *HealthCheck_GrpcHealthCheck) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for ServiceName
+
+ if !_HealthCheck_GrpcHealthCheck_Authority_Pattern.MatchString(m.GetAuthority()) {
+ err := HealthCheck_GrpcHealthCheckValidationError{
+ field: "Authority",
+ reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if len(m.GetInitialMetadata()) > 1000 {
+ err := HealthCheck_GrpcHealthCheckValidationError{
+ field: "InitialMetadata",
+ reason: "value must contain no more than 1000 item(s)",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ for idx, item := range m.GetInitialMetadata() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, HealthCheck_GrpcHealthCheckValidationError{
+ field: fmt.Sprintf("InitialMetadata[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, HealthCheck_GrpcHealthCheckValidationError{
+ field: fmt.Sprintf("InitialMetadata[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HealthCheck_GrpcHealthCheckValidationError{
+ field: fmt.Sprintf("InitialMetadata[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return HealthCheck_GrpcHealthCheckMultiError(errors)
+ }
+
+ return nil
+}
+
+// HealthCheck_GrpcHealthCheckMultiError is an error wrapping multiple
+// validation errors returned by HealthCheck_GrpcHealthCheck.ValidateAll() if
+// the designated constraints aren't met.
+type HealthCheck_GrpcHealthCheckMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m HealthCheck_GrpcHealthCheckMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m HealthCheck_GrpcHealthCheckMultiError) AllErrors() []error { return m }
+
+// HealthCheck_GrpcHealthCheckValidationError is the validation error returned
+// by HealthCheck_GrpcHealthCheck.Validate if the designated constraints
+// aren't met.
+type HealthCheck_GrpcHealthCheckValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e HealthCheck_GrpcHealthCheckValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e HealthCheck_GrpcHealthCheckValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e HealthCheck_GrpcHealthCheckValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e HealthCheck_GrpcHealthCheckValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e HealthCheck_GrpcHealthCheckValidationError) ErrorName() string {
+ return "HealthCheck_GrpcHealthCheckValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e HealthCheck_GrpcHealthCheckValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sHealthCheck_GrpcHealthCheck.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = HealthCheck_GrpcHealthCheckValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = HealthCheck_GrpcHealthCheckValidationError{}
+
+var _HealthCheck_GrpcHealthCheck_Authority_Pattern = regexp.MustCompile("^[^\x00\n\r]*$")
+
+// Validate checks the field values on HealthCheck_CustomHealthCheck with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *HealthCheck_CustomHealthCheck) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on HealthCheck_CustomHealthCheck with
+// the rules defined in the proto definition for this message. If any rules
+// are violated, the result is a list of violation errors wrapped in
+// HealthCheck_CustomHealthCheckMultiError, or nil if none found.
+func (m *HealthCheck_CustomHealthCheck) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *HealthCheck_CustomHealthCheck) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if utf8.RuneCountInString(m.GetName()) < 1 {
+ err := HealthCheck_CustomHealthCheckValidationError{
+ field: "Name",
+ reason: "value length must be at least 1 runes",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ switch v := m.ConfigType.(type) {
+ case *HealthCheck_CustomHealthCheck_TypedConfig:
+ if v == nil {
+ err := HealthCheck_CustomHealthCheckValidationError{
+ field: "ConfigType",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetTypedConfig()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, HealthCheck_CustomHealthCheckValidationError{
+ field: "TypedConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, HealthCheck_CustomHealthCheckValidationError{
+ field: "TypedConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetTypedConfig()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HealthCheck_CustomHealthCheckValidationError{
+ field: "TypedConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ default:
+ _ = v // ensures v is used
+ }
+
+ if len(errors) > 0 {
+ return HealthCheck_CustomHealthCheckMultiError(errors)
+ }
+
+ return nil
+}
+
+// HealthCheck_CustomHealthCheckMultiError is an error wrapping multiple
+// validation errors returned by HealthCheck_CustomHealthCheck.ValidateAll()
+// if the designated constraints aren't met.
+type HealthCheck_CustomHealthCheckMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m HealthCheck_CustomHealthCheckMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m HealthCheck_CustomHealthCheckMultiError) AllErrors() []error { return m }
+
+// HealthCheck_CustomHealthCheckValidationError is the validation error
+// returned by HealthCheck_CustomHealthCheck.Validate if the designated
+// constraints aren't met.
+type HealthCheck_CustomHealthCheckValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e HealthCheck_CustomHealthCheckValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e HealthCheck_CustomHealthCheckValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e HealthCheck_CustomHealthCheckValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e HealthCheck_CustomHealthCheckValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e HealthCheck_CustomHealthCheckValidationError) ErrorName() string {
+ return "HealthCheck_CustomHealthCheckValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e HealthCheck_CustomHealthCheckValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sHealthCheck_CustomHealthCheck.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = HealthCheck_CustomHealthCheckValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = HealthCheck_CustomHealthCheckValidationError{}
+
+// Validate checks the field values on HealthCheck_TlsOptions with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *HealthCheck_TlsOptions) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on HealthCheck_TlsOptions with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// HealthCheck_TlsOptionsMultiError, or nil if none found.
+func (m *HealthCheck_TlsOptions) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *HealthCheck_TlsOptions) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if len(errors) > 0 {
+ return HealthCheck_TlsOptionsMultiError(errors)
+ }
+
+ return nil
+}
+
+// HealthCheck_TlsOptionsMultiError is an error wrapping multiple validation
+// errors returned by HealthCheck_TlsOptions.ValidateAll() if the designated
+// constraints aren't met.
+type HealthCheck_TlsOptionsMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m HealthCheck_TlsOptionsMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m HealthCheck_TlsOptionsMultiError) AllErrors() []error { return m }
+
+// HealthCheck_TlsOptionsValidationError is the validation error returned by
+// HealthCheck_TlsOptions.Validate if the designated constraints aren't met.
+type HealthCheck_TlsOptionsValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e HealthCheck_TlsOptionsValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e HealthCheck_TlsOptionsValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e HealthCheck_TlsOptionsValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e HealthCheck_TlsOptionsValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e HealthCheck_TlsOptionsValidationError) ErrorName() string {
+ return "HealthCheck_TlsOptionsValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e HealthCheck_TlsOptionsValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sHealthCheck_TlsOptions.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = HealthCheck_TlsOptionsValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = HealthCheck_TlsOptionsValidationError{}
diff --git a/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/http_uri.pb.go b/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/http_uri.pb.go
new file mode 100644
index 000000000..76b21f492
--- /dev/null
+++ b/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/http_uri.pb.go
@@ -0,0 +1,240 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.23.0
+// protoc v4.23.1
+// source: envoy/config/core/v3/http_uri.proto
+
+package corev3
+
+import (
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ proto "github.com/golang/protobuf/proto"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ durationpb "google.golang.org/protobuf/types/known/durationpb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+// Envoy external URI descriptor
+type HttpUri struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The HTTP server URI. It should be a full FQDN with protocol, host and path.
+ //
+ // Example:
+ //
+ // .. code-block:: yaml
+ //
+ // uri: https://www.googleapis.com/oauth2/v1/certs
+ Uri string `protobuf:"bytes,1,opt,name=uri,proto3" json:"uri,omitempty"`
+ // Specify how “uri“ is to be fetched. Today, this requires an explicit
+ // cluster, but in the future we may support dynamic cluster creation or
+ // inline DNS resolution. See `issue
+ // `_.
+ //
+ // Types that are assignable to HttpUpstreamType:
+ //
+ // *HttpUri_Cluster
+ HttpUpstreamType isHttpUri_HttpUpstreamType `protobuf_oneof:"http_upstream_type"`
+ // Sets the maximum duration in milliseconds that a response can take to arrive upon request.
+ Timeout *durationpb.Duration `protobuf:"bytes,3,opt,name=timeout,proto3" json:"timeout,omitempty"`
+}
+
+func (x *HttpUri) Reset() {
+ *x = HttpUri{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_http_uri_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *HttpUri) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HttpUri) ProtoMessage() {}
+
+func (x *HttpUri) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_http_uri_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HttpUri.ProtoReflect.Descriptor instead.
+func (*HttpUri) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_http_uri_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *HttpUri) GetUri() string {
+ if x != nil {
+ return x.Uri
+ }
+ return ""
+}
+
+func (m *HttpUri) GetHttpUpstreamType() isHttpUri_HttpUpstreamType {
+ if m != nil {
+ return m.HttpUpstreamType
+ }
+ return nil
+}
+
+func (x *HttpUri) GetCluster() string {
+ if x, ok := x.GetHttpUpstreamType().(*HttpUri_Cluster); ok {
+ return x.Cluster
+ }
+ return ""
+}
+
+func (x *HttpUri) GetTimeout() *durationpb.Duration {
+ if x != nil {
+ return x.Timeout
+ }
+ return nil
+}
+
+type isHttpUri_HttpUpstreamType interface {
+ isHttpUri_HttpUpstreamType()
+}
+
+type HttpUri_Cluster struct {
+ // A cluster is created in the Envoy "cluster_manager" config
+ // section. This field specifies the cluster name.
+ //
+ // Example:
+ //
+ // .. code-block:: yaml
+ //
+ // cluster: jwks_cluster
+ Cluster string `protobuf:"bytes,2,opt,name=cluster,proto3,oneof"`
+}
+
+func (*HttpUri_Cluster) isHttpUri_HttpUpstreamType() {}
+
+var File_envoy_config_core_v3_http_uri_proto protoreflect.FileDescriptor
+
+var file_envoy_config_core_v3_http_uri_proto_rawDesc = []byte{
+ 0x0a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63,
+ 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x75, 0x72, 0x69, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x1e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70,
+ 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74,
+ 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61,
+ 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72,
+ 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76,
+ 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc7, 0x01, 0x0a, 0x07, 0x48, 0x74, 0x74, 0x70, 0x55,
+ 0x72, 0x69, 0x12, 0x19, 0x0a, 0x03, 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42,
+ 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x03, 0x75, 0x72, 0x69, 0x12, 0x23, 0x0a,
+ 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07,
+ 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74,
+ 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0a,
+ 0xfa, 0x42, 0x07, 0xaa, 0x01, 0x04, 0x08, 0x01, 0x32, 0x00, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65,
+ 0x6f, 0x75, 0x74, 0x3a, 0x20, 0x9a, 0xc5, 0x88, 0x1e, 0x1b, 0x0a, 0x19, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x74,
+ 0x74, 0x70, 0x55, 0x72, 0x69, 0x42, 0x19, 0x0a, 0x12, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x75, 0x70,
+ 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x12, 0x03, 0xf8, 0x42, 0x01,
+ 0x42, 0x80, 0x01, 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f,
+ 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e,
+ 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x0c, 0x48, 0x74, 0x74, 0x70, 0x55, 0x72, 0x69,
+ 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e,
+ 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67,
+ 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72,
+ 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x63, 0x6f, 0x72, 0x65, 0x76, 0x33, 0xba, 0x80, 0xc8, 0xd1, 0x06,
+ 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_config_core_v3_http_uri_proto_rawDescOnce sync.Once
+ file_envoy_config_core_v3_http_uri_proto_rawDescData = file_envoy_config_core_v3_http_uri_proto_rawDesc
+)
+
+func file_envoy_config_core_v3_http_uri_proto_rawDescGZIP() []byte {
+ file_envoy_config_core_v3_http_uri_proto_rawDescOnce.Do(func() {
+ file_envoy_config_core_v3_http_uri_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_core_v3_http_uri_proto_rawDescData)
+ })
+ return file_envoy_config_core_v3_http_uri_proto_rawDescData
+}
+
+var file_envoy_config_core_v3_http_uri_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_envoy_config_core_v3_http_uri_proto_goTypes = []interface{}{
+ (*HttpUri)(nil), // 0: envoy.config.core.v3.HttpUri
+ (*durationpb.Duration)(nil), // 1: google.protobuf.Duration
+}
+var file_envoy_config_core_v3_http_uri_proto_depIdxs = []int32{
+ 1, // 0: envoy.config.core.v3.HttpUri.timeout:type_name -> google.protobuf.Duration
+ 1, // [1:1] is the sub-list for method output_type
+ 1, // [1:1] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 1, // [1:1] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_envoy_config_core_v3_http_uri_proto_init() }
+func file_envoy_config_core_v3_http_uri_proto_init() {
+ if File_envoy_config_core_v3_http_uri_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_config_core_v3_http_uri_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*HttpUri); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_envoy_config_core_v3_http_uri_proto_msgTypes[0].OneofWrappers = []interface{}{
+ (*HttpUri_Cluster)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_config_core_v3_http_uri_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_config_core_v3_http_uri_proto_goTypes,
+ DependencyIndexes: file_envoy_config_core_v3_http_uri_proto_depIdxs,
+ MessageInfos: file_envoy_config_core_v3_http_uri_proto_msgTypes,
+ }.Build()
+ File_envoy_config_core_v3_http_uri_proto = out.File
+ file_envoy_config_core_v3_http_uri_proto_rawDesc = nil
+ file_envoy_config_core_v3_http_uri_proto_goTypes = nil
+ file_envoy_config_core_v3_http_uri_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/http_uri.pb.validate.go b/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/http_uri.pb.validate.go
new file mode 100644
index 000000000..0e3ab815e
--- /dev/null
+++ b/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/http_uri.pb.validate.go
@@ -0,0 +1,226 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/config/core/v3/http_uri.proto
+
+package corev3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on HttpUri with the rules defined in the
+// proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *HttpUri) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on HttpUri with the rules defined in the
+// proto definition for this message. If any rules are violated, the result is
+// a list of violation errors wrapped in HttpUriMultiError, or nil if none found.
+func (m *HttpUri) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *HttpUri) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if utf8.RuneCountInString(m.GetUri()) < 1 {
+ err := HttpUriValidationError{
+ field: "Uri",
+ reason: "value length must be at least 1 runes",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if m.GetTimeout() == nil {
+ err := HttpUriValidationError{
+ field: "Timeout",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if d := m.GetTimeout(); d != nil {
+ dur, err := d.AsDuration(), d.CheckValid()
+ if err != nil {
+ err = HttpUriValidationError{
+ field: "Timeout",
+ reason: "value is not a valid duration",
+ cause: err,
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ } else {
+
+ gte := time.Duration(0*time.Second + 0*time.Nanosecond)
+
+ if dur < gte {
+ err := HttpUriValidationError{
+ field: "Timeout",
+ reason: "value must be greater than or equal to 0s",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ }
+ }
+
+ oneofHttpUpstreamTypePresent := false
+ switch v := m.HttpUpstreamType.(type) {
+ case *HttpUri_Cluster:
+ if v == nil {
+ err := HttpUriValidationError{
+ field: "HttpUpstreamType",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofHttpUpstreamTypePresent = true
+
+ if utf8.RuneCountInString(m.GetCluster()) < 1 {
+ err := HttpUriValidationError{
+ field: "Cluster",
+ reason: "value length must be at least 1 runes",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ default:
+ _ = v // ensures v is used
+ }
+ if !oneofHttpUpstreamTypePresent {
+ err := HttpUriValidationError{
+ field: "HttpUpstreamType",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return HttpUriMultiError(errors)
+ }
+
+ return nil
+}
+
+// HttpUriMultiError is an error wrapping multiple validation errors returned
+// by HttpUri.ValidateAll() if the designated constraints aren't met.
+type HttpUriMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m HttpUriMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m HttpUriMultiError) AllErrors() []error { return m }
+
+// HttpUriValidationError is the validation error returned by HttpUri.Validate
+// if the designated constraints aren't met.
+type HttpUriValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e HttpUriValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e HttpUriValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e HttpUriValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e HttpUriValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e HttpUriValidationError) ErrorName() string { return "HttpUriValidationError" }
+
+// Error satisfies the builtin error interface
+func (e HttpUriValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sHttpUri.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = HttpUriValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = HttpUriValidationError{}
diff --git a/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/protocol.pb.go b/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/protocol.pb.go
new file mode 100644
index 000000000..1d67dbd8d
--- /dev/null
+++ b/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/protocol.pb.go
@@ -0,0 +1,2382 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.23.0
+// protoc v4.23.1
+// source: envoy/config/core/v3/protocol.proto
+
+package corev3
+
+import (
+ _ "github.com/cilium/proxy/go/envoy/annotations"
+ v3 "github.com/cilium/proxy/go/envoy/type/v3"
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ _ "github.com/cncf/xds/go/xds/annotations/v3"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ proto "github.com/golang/protobuf/proto"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ durationpb "google.golang.org/protobuf/types/known/durationpb"
+ wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+// Action to take when Envoy receives client request with header names containing underscore
+// characters.
+// Underscore character is allowed in header names by the RFC-7230 and this behavior is implemented
+// as a security measure due to systems that treat '_' and '-' as interchangeable. Envoy by default allows client request headers with underscore
+// characters.
+type HttpProtocolOptions_HeadersWithUnderscoresAction int32
+
+const (
+ // Allow headers with underscores. This is the default behavior.
+ HttpProtocolOptions_ALLOW HttpProtocolOptions_HeadersWithUnderscoresAction = 0
+ // Reject client request. HTTP/1 requests are rejected with the 400 status. HTTP/2 requests
+ // end with the stream reset. The "httpN.requests_rejected_with_underscores_in_headers" counter
+ // is incremented for each rejected request.
+ HttpProtocolOptions_REJECT_REQUEST HttpProtocolOptions_HeadersWithUnderscoresAction = 1
+ // Drop the client header with name containing underscores. The header is dropped before the filter chain is
+ // invoked and as such filters will not see dropped headers. The
+ // "httpN.dropped_headers_with_underscores" is incremented for each dropped header.
+ HttpProtocolOptions_DROP_HEADER HttpProtocolOptions_HeadersWithUnderscoresAction = 2
+)
+
+// Enum value maps for HttpProtocolOptions_HeadersWithUnderscoresAction.
+var (
+ HttpProtocolOptions_HeadersWithUnderscoresAction_name = map[int32]string{
+ 0: "ALLOW",
+ 1: "REJECT_REQUEST",
+ 2: "DROP_HEADER",
+ }
+ HttpProtocolOptions_HeadersWithUnderscoresAction_value = map[string]int32{
+ "ALLOW": 0,
+ "REJECT_REQUEST": 1,
+ "DROP_HEADER": 2,
+ }
+)
+
+func (x HttpProtocolOptions_HeadersWithUnderscoresAction) Enum() *HttpProtocolOptions_HeadersWithUnderscoresAction {
+ p := new(HttpProtocolOptions_HeadersWithUnderscoresAction)
+ *p = x
+ return p
+}
+
+func (x HttpProtocolOptions_HeadersWithUnderscoresAction) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (HttpProtocolOptions_HeadersWithUnderscoresAction) Descriptor() protoreflect.EnumDescriptor {
+ return file_envoy_config_core_v3_protocol_proto_enumTypes[0].Descriptor()
+}
+
+func (HttpProtocolOptions_HeadersWithUnderscoresAction) Type() protoreflect.EnumType {
+ return &file_envoy_config_core_v3_protocol_proto_enumTypes[0]
+}
+
+func (x HttpProtocolOptions_HeadersWithUnderscoresAction) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use HttpProtocolOptions_HeadersWithUnderscoresAction.Descriptor instead.
+func (HttpProtocolOptions_HeadersWithUnderscoresAction) EnumDescriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_protocol_proto_rawDescGZIP(), []int{5, 0}
+}
+
+// [#not-implemented-hide:]
+type TcpProtocolOptions struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *TcpProtocolOptions) Reset() {
+ *x = TcpProtocolOptions{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *TcpProtocolOptions) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TcpProtocolOptions) ProtoMessage() {}
+
+func (x *TcpProtocolOptions) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TcpProtocolOptions.ProtoReflect.Descriptor instead.
+func (*TcpProtocolOptions) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_protocol_proto_rawDescGZIP(), []int{0}
+}
+
+// Config for keepalive probes in a QUIC connection.
+// Note that QUIC keep-alive probing packets work differently from HTTP/2 keep-alive PINGs in a sense that the probing packet
+// itself doesn't timeout waiting for a probing response. Quic has a shorter idle timeout than TCP, so it doesn't rely on such probing to discover dead connections. If the peer fails to respond, the connection will idle timeout eventually. Thus, they are configured differently from :ref:`connection_keepalive `.
+type QuicKeepAliveSettings struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The max interval for a connection to send keep-alive probing packets (with PING or PATH_RESPONSE). The value should be smaller than :ref:`connection idle_timeout ` to prevent idle timeout while not less than 1s to avoid throttling the connection or flooding the peer with probes.
+ //
+ // If :ref:`initial_interval ` is absent or zero, a client connection will use this value to start probing.
+ //
+ // If zero, disable keepalive probing.
+ // If absent, use the QUICHE default interval to probe.
+ MaxInterval *durationpb.Duration `protobuf:"bytes,1,opt,name=max_interval,json=maxInterval,proto3" json:"max_interval,omitempty"`
+ // The interval to send the first few keep-alive probing packets to prevent connection from hitting the idle timeout. Subsequent probes will be sent, each one with an interval exponentially longer than previous one, till it reaches :ref:`max_interval `. And the probes afterwards will always use :ref:`max_interval `.
+ //
+ // The value should be smaller than :ref:`connection idle_timeout ` to prevent idle timeout and smaller than max_interval to take effect.
+ //
+ // If absent or zero, disable keepalive probing for a server connection. For a client connection, if :ref:`max_interval ` is also zero, do not keepalive, otherwise use max_interval or QUICHE default to probe all the time.
+ InitialInterval *durationpb.Duration `protobuf:"bytes,2,opt,name=initial_interval,json=initialInterval,proto3" json:"initial_interval,omitempty"`
+}
+
+func (x *QuicKeepAliveSettings) Reset() {
+ *x = QuicKeepAliveSettings{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *QuicKeepAliveSettings) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*QuicKeepAliveSettings) ProtoMessage() {}
+
+func (x *QuicKeepAliveSettings) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use QuicKeepAliveSettings.ProtoReflect.Descriptor instead.
+func (*QuicKeepAliveSettings) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_protocol_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *QuicKeepAliveSettings) GetMaxInterval() *durationpb.Duration {
+ if x != nil {
+ return x.MaxInterval
+ }
+ return nil
+}
+
+func (x *QuicKeepAliveSettings) GetInitialInterval() *durationpb.Duration {
+ if x != nil {
+ return x.InitialInterval
+ }
+ return nil
+}
+
+// QUIC protocol options which apply to both downstream and upstream connections.
+// [#next-free-field: 6]
+type QuicProtocolOptions struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Maximum number of streams that the client can negotiate per connection. 100
+ // if not specified.
+ MaxConcurrentStreams *wrapperspb.UInt32Value `protobuf:"bytes,1,opt,name=max_concurrent_streams,json=maxConcurrentStreams,proto3" json:"max_concurrent_streams,omitempty"`
+ // `Initial stream-level flow-control receive window
+ // `_ size. Valid values range from
+ // 1 to 16777216 (2^24, maximum supported by QUICHE) and defaults to 65536 (2^16).
+ //
+ // NOTE: 16384 (2^14) is the minimum window size supported in Google QUIC. If configured smaller than it, we will use 16384 instead.
+ // QUICHE IETF Quic implementation supports 1 bytes window. We only support increasing the default window size now, so it's also the minimum.
+ //
+ // This field also acts as a soft limit on the number of bytes Envoy will buffer per-stream in the
+ // QUIC stream send and receive buffers. Once the buffer reaches this pointer, watermark callbacks will fire to
+ // stop the flow of data to the stream buffers.
+ InitialStreamWindowSize *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=initial_stream_window_size,json=initialStreamWindowSize,proto3" json:"initial_stream_window_size,omitempty"`
+ // Similar to “initial_stream_window_size“, but for connection-level
+ // flow-control. Valid values rage from 1 to 25165824 (24MB, maximum supported by QUICHE) and defaults to 65536 (2^16).
+ // window. Currently, this has the same minimum/default as “initial_stream_window_size“.
+ //
+ // NOTE: 16384 (2^14) is the minimum window size supported in Google QUIC. We only support increasing the default
+ // window size now, so it's also the minimum.
+ InitialConnectionWindowSize *wrapperspb.UInt32Value `protobuf:"bytes,3,opt,name=initial_connection_window_size,json=initialConnectionWindowSize,proto3" json:"initial_connection_window_size,omitempty"`
+ // The number of timeouts that can occur before port migration is triggered for QUIC clients.
+ // This defaults to 1. If set to 0, port migration will not occur on path degrading.
+ // Timeout here refers to QUIC internal path degrading timeout mechanism, such as PTO.
+ // This has no effect on server sessions.
+ NumTimeoutsToTriggerPortMigration *wrapperspb.UInt32Value `protobuf:"bytes,4,opt,name=num_timeouts_to_trigger_port_migration,json=numTimeoutsToTriggerPortMigration,proto3" json:"num_timeouts_to_trigger_port_migration,omitempty"`
+ // Probes the peer at the configured interval to solicit traffic, i.e. ACK or PATH_RESPONSE, from the peer to push back connection idle timeout.
+ // If absent, use the default keepalive behavior of which a client connection sends PINGs every 15s, and a server connection doesn't do anything.
+ ConnectionKeepalive *QuicKeepAliveSettings `protobuf:"bytes,5,opt,name=connection_keepalive,json=connectionKeepalive,proto3" json:"connection_keepalive,omitempty"`
+}
+
+func (x *QuicProtocolOptions) Reset() {
+ *x = QuicProtocolOptions{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *QuicProtocolOptions) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*QuicProtocolOptions) ProtoMessage() {}
+
+func (x *QuicProtocolOptions) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use QuicProtocolOptions.ProtoReflect.Descriptor instead.
+func (*QuicProtocolOptions) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_protocol_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *QuicProtocolOptions) GetMaxConcurrentStreams() *wrapperspb.UInt32Value {
+ if x != nil {
+ return x.MaxConcurrentStreams
+ }
+ return nil
+}
+
+func (x *QuicProtocolOptions) GetInitialStreamWindowSize() *wrapperspb.UInt32Value {
+ if x != nil {
+ return x.InitialStreamWindowSize
+ }
+ return nil
+}
+
+func (x *QuicProtocolOptions) GetInitialConnectionWindowSize() *wrapperspb.UInt32Value {
+ if x != nil {
+ return x.InitialConnectionWindowSize
+ }
+ return nil
+}
+
+func (x *QuicProtocolOptions) GetNumTimeoutsToTriggerPortMigration() *wrapperspb.UInt32Value {
+ if x != nil {
+ return x.NumTimeoutsToTriggerPortMigration
+ }
+ return nil
+}
+
+func (x *QuicProtocolOptions) GetConnectionKeepalive() *QuicKeepAliveSettings {
+ if x != nil {
+ return x.ConnectionKeepalive
+ }
+ return nil
+}
+
+type UpstreamHttpProtocolOptions struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Set transport socket `SNI `_ for new
+ // upstream connections based on the downstream HTTP host/authority header or any other arbitrary
+ // header when :ref:`override_auto_sni_header `
+ // is set, as seen by the :ref:`router filter `.
+ // Does nothing if a filter before the http router filter sets the corresponding metadata.
+ AutoSni bool `protobuf:"varint,1,opt,name=auto_sni,json=autoSni,proto3" json:"auto_sni,omitempty"`
+ // Automatic validate upstream presented certificate for new upstream connections based on the
+ // downstream HTTP host/authority header or any other arbitrary header when :ref:`override_auto_sni_header `
+ // is set, as seen by the :ref:`router filter `.
+ // This field is intended to be set with “auto_sni“ field.
+ // Does nothing if a filter before the http router filter sets the corresponding metadata.
+ AutoSanValidation bool `protobuf:"varint,2,opt,name=auto_san_validation,json=autoSanValidation,proto3" json:"auto_san_validation,omitempty"`
+ // An optional alternative to the host/authority header to be used for setting the SNI value.
+ // It should be a valid downstream HTTP header, as seen by the
+ // :ref:`router filter `.
+ // If unset, host/authority header will be used for populating the SNI. If the specified header
+ // is not found or the value is empty, host/authority header will be used instead.
+ // This field is intended to be set with “auto_sni“ and/or “auto_san_validation“ fields.
+ // If none of these fields are set then setting this would be a no-op.
+ // Does nothing if a filter before the http router filter sets the corresponding metadata.
+ OverrideAutoSniHeader string `protobuf:"bytes,3,opt,name=override_auto_sni_header,json=overrideAutoSniHeader,proto3" json:"override_auto_sni_header,omitempty"`
+}
+
+func (x *UpstreamHttpProtocolOptions) Reset() {
+ *x = UpstreamHttpProtocolOptions{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *UpstreamHttpProtocolOptions) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UpstreamHttpProtocolOptions) ProtoMessage() {}
+
+func (x *UpstreamHttpProtocolOptions) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UpstreamHttpProtocolOptions.ProtoReflect.Descriptor instead.
+func (*UpstreamHttpProtocolOptions) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_protocol_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *UpstreamHttpProtocolOptions) GetAutoSni() bool {
+ if x != nil {
+ return x.AutoSni
+ }
+ return false
+}
+
+func (x *UpstreamHttpProtocolOptions) GetAutoSanValidation() bool {
+ if x != nil {
+ return x.AutoSanValidation
+ }
+ return false
+}
+
+func (x *UpstreamHttpProtocolOptions) GetOverrideAutoSniHeader() string {
+ if x != nil {
+ return x.OverrideAutoSniHeader
+ }
+ return ""
+}
+
+// Configures the alternate protocols cache which tracks alternate protocols that can be used to
+// make an HTTP connection to an origin server. See https://tools.ietf.org/html/rfc7838 for
+// HTTP Alternative Services and https://datatracker.ietf.org/doc/html/draft-ietf-dnsop-svcb-https-04
+// for the "HTTPS" DNS resource record.
+// [#next-free-field: 6]
+type AlternateProtocolsCacheOptions struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The name of the cache. Multiple named caches allow independent alternate protocols cache
+ // configurations to operate within a single Envoy process using different configurations. All
+ // alternate protocols cache options with the same name *must* be equal in all fields when
+ // referenced from different configuration components. Configuration will fail to load if this is
+ // not the case.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // The maximum number of entries that the cache will hold. If not specified defaults to 1024.
+ //
+ // .. note:
+ //
+ // The implementation is approximate and enforced independently on each worker thread, thus
+ // it is possible for the maximum entries in the cache to go slightly above the configured
+ // value depending on timing. This is similar to how other circuit breakers work.
+ MaxEntries *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=max_entries,json=maxEntries,proto3" json:"max_entries,omitempty"`
+ // Allows configuring a persistent
+ // :ref:`key value store ` to flush
+ // alternate protocols entries to disk.
+ // This function is currently only supported if concurrency is 1
+ // Cached entries will take precedence over pre-populated entries below.
+ KeyValueStoreConfig *TypedExtensionConfig `protobuf:"bytes,3,opt,name=key_value_store_config,json=keyValueStoreConfig,proto3" json:"key_value_store_config,omitempty"`
+ // Allows pre-populating the cache with entries, as described above.
+ PrepopulatedEntries []*AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry `protobuf:"bytes,4,rep,name=prepopulated_entries,json=prepopulatedEntries,proto3" json:"prepopulated_entries,omitempty"`
+ // Optional list of hostnames suffixes for which Alt-Svc entries can be shared. For example, if
+ // this list contained the value “.c.example.com“, then an Alt-Svc entry for “foo.c.example.com“
+ // could be shared with “bar.c.example.com“ but would not be shared with “baz.example.com“. On
+ // the other hand, if the list contained the value “.example.com“ then all three hosts could share
+ // Alt-Svc entries. Each entry must start with “.“. If a hostname matches multiple suffixes, the
+ // first listed suffix will be used.
+ //
+ // Since lookup in this list is O(n), it is recommended that the number of suffixes be limited.
+ // [#not-implemented-hide:]
+ CanonicalSuffixes []string `protobuf:"bytes,5,rep,name=canonical_suffixes,json=canonicalSuffixes,proto3" json:"canonical_suffixes,omitempty"`
+}
+
+func (x *AlternateProtocolsCacheOptions) Reset() {
+ *x = AlternateProtocolsCacheOptions{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AlternateProtocolsCacheOptions) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AlternateProtocolsCacheOptions) ProtoMessage() {}
+
+func (x *AlternateProtocolsCacheOptions) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AlternateProtocolsCacheOptions.ProtoReflect.Descriptor instead.
+func (*AlternateProtocolsCacheOptions) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_protocol_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *AlternateProtocolsCacheOptions) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *AlternateProtocolsCacheOptions) GetMaxEntries() *wrapperspb.UInt32Value {
+ if x != nil {
+ return x.MaxEntries
+ }
+ return nil
+}
+
+func (x *AlternateProtocolsCacheOptions) GetKeyValueStoreConfig() *TypedExtensionConfig {
+ if x != nil {
+ return x.KeyValueStoreConfig
+ }
+ return nil
+}
+
+func (x *AlternateProtocolsCacheOptions) GetPrepopulatedEntries() []*AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry {
+ if x != nil {
+ return x.PrepopulatedEntries
+ }
+ return nil
+}
+
+func (x *AlternateProtocolsCacheOptions) GetCanonicalSuffixes() []string {
+ if x != nil {
+ return x.CanonicalSuffixes
+ }
+ return nil
+}
+
+// [#next-free-field: 7]
+type HttpProtocolOptions struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The idle timeout for connections. The idle timeout is defined as the
+ // period in which there are no active requests. When the
+ // idle timeout is reached the connection will be closed. If the connection is an HTTP/2
+ // downstream connection a drain sequence will occur prior to closing the connection, see
+ // :ref:`drain_timeout
+ // `.
+ // Note that request based timeouts mean that HTTP/2 PINGs will not keep the connection alive.
+ // If not specified, this defaults to 1 hour. To disable idle timeouts explicitly set this to 0.
+ //
+ // .. warning::
+ //
+ // Disabling this timeout has a highly likelihood of yielding connection leaks due to lost TCP
+ // FIN packets, etc.
+ //
+ // If the :ref:`overload action ` "envoy.overload_actions.reduce_timeouts"
+ // is configured, this timeout is scaled for downstream connections according to the value for
+ // :ref:`HTTP_DOWNSTREAM_CONNECTION_IDLE `.
+ IdleTimeout *durationpb.Duration `protobuf:"bytes,1,opt,name=idle_timeout,json=idleTimeout,proto3" json:"idle_timeout,omitempty"`
+ // The maximum duration of a connection. The duration is defined as a period since a connection
+ // was established. If not set, there is no max duration. When max_connection_duration is reached
+ // and if there are no active streams, the connection will be closed. If the connection is a
+ // downstream connection and there are any active streams, the drain sequence will kick-in,
+ // and the connection will be force-closed after the drain period. See :ref:`drain_timeout
+ // `.
+ MaxConnectionDuration *durationpb.Duration `protobuf:"bytes,3,opt,name=max_connection_duration,json=maxConnectionDuration,proto3" json:"max_connection_duration,omitempty"`
+ // The maximum number of headers. If unconfigured, the default
+ // maximum number of request headers allowed is 100. Requests that exceed this limit will receive
+ // a 431 response for HTTP/1.x and cause a stream reset for HTTP/2.
+ MaxHeadersCount *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=max_headers_count,json=maxHeadersCount,proto3" json:"max_headers_count,omitempty"`
+ // Total duration to keep alive an HTTP request/response stream. If the time limit is reached the stream will be
+ // reset independent of any other timeouts. If not specified, this value is not set.
+ MaxStreamDuration *durationpb.Duration `protobuf:"bytes,4,opt,name=max_stream_duration,json=maxStreamDuration,proto3" json:"max_stream_duration,omitempty"`
+ // Action to take when a client request with a header name containing underscore characters is received.
+ // If this setting is not specified, the value defaults to ALLOW.
+ // Note: upstream responses are not affected by this setting.
+ // Note: this only affects client headers. It does not affect headers added
+ // by Envoy filters and does not have any impact if added to cluster config.
+ HeadersWithUnderscoresAction HttpProtocolOptions_HeadersWithUnderscoresAction `protobuf:"varint,5,opt,name=headers_with_underscores_action,json=headersWithUnderscoresAction,proto3,enum=envoy.config.core.v3.HttpProtocolOptions_HeadersWithUnderscoresAction" json:"headers_with_underscores_action,omitempty"`
+ // Optional maximum requests for both upstream and downstream connections.
+ // If not specified, there is no limit.
+ // Setting this parameter to 1 will effectively disable keep alive.
+ // For HTTP/2 and HTTP/3, due to concurrent stream processing, the limit is approximate.
+ MaxRequestsPerConnection *wrapperspb.UInt32Value `protobuf:"bytes,6,opt,name=max_requests_per_connection,json=maxRequestsPerConnection,proto3" json:"max_requests_per_connection,omitempty"`
+}
+
+func (x *HttpProtocolOptions) Reset() {
+ *x = HttpProtocolOptions{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *HttpProtocolOptions) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HttpProtocolOptions) ProtoMessage() {}
+
+func (x *HttpProtocolOptions) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HttpProtocolOptions.ProtoReflect.Descriptor instead.
+func (*HttpProtocolOptions) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_protocol_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *HttpProtocolOptions) GetIdleTimeout() *durationpb.Duration {
+ if x != nil {
+ return x.IdleTimeout
+ }
+ return nil
+}
+
+func (x *HttpProtocolOptions) GetMaxConnectionDuration() *durationpb.Duration {
+ if x != nil {
+ return x.MaxConnectionDuration
+ }
+ return nil
+}
+
+func (x *HttpProtocolOptions) GetMaxHeadersCount() *wrapperspb.UInt32Value {
+ if x != nil {
+ return x.MaxHeadersCount
+ }
+ return nil
+}
+
+func (x *HttpProtocolOptions) GetMaxStreamDuration() *durationpb.Duration {
+ if x != nil {
+ return x.MaxStreamDuration
+ }
+ return nil
+}
+
+func (x *HttpProtocolOptions) GetHeadersWithUnderscoresAction() HttpProtocolOptions_HeadersWithUnderscoresAction {
+ if x != nil {
+ return x.HeadersWithUnderscoresAction
+ }
+ return HttpProtocolOptions_ALLOW
+}
+
+func (x *HttpProtocolOptions) GetMaxRequestsPerConnection() *wrapperspb.UInt32Value {
+ if x != nil {
+ return x.MaxRequestsPerConnection
+ }
+ return nil
+}
+
+// [#next-free-field: 11]
+type Http1ProtocolOptions struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Handle HTTP requests with absolute URLs in the requests. These requests
+ // are generally sent by clients to forward/explicit proxies. This allows clients to configure
+ // envoy as their HTTP proxy. In Unix, for example, this is typically done by setting the
+ // “http_proxy“ environment variable.
+ AllowAbsoluteUrl *wrapperspb.BoolValue `protobuf:"bytes,1,opt,name=allow_absolute_url,json=allowAbsoluteUrl,proto3" json:"allow_absolute_url,omitempty"`
+ // Handle incoming HTTP/1.0 and HTTP 0.9 requests.
+ // This is off by default, and not fully standards compliant. There is support for pre-HTTP/1.1
+ // style connect logic, dechunking, and handling lack of client host iff
+ // “default_host_for_http_10“ is configured.
+ AcceptHttp_10 bool `protobuf:"varint,2,opt,name=accept_http_10,json=acceptHttp10,proto3" json:"accept_http_10,omitempty"`
+ // A default host for HTTP/1.0 requests. This is highly suggested if “accept_http_10“ is true as
+ // Envoy does not otherwise support HTTP/1.0 without a Host header.
+ // This is a no-op if “accept_http_10“ is not true.
+ DefaultHostForHttp_10 string `protobuf:"bytes,3,opt,name=default_host_for_http_10,json=defaultHostForHttp10,proto3" json:"default_host_for_http_10,omitempty"`
+ // Describes how the keys for response headers should be formatted. By default, all header keys
+ // are lower cased.
+ HeaderKeyFormat *Http1ProtocolOptions_HeaderKeyFormat `protobuf:"bytes,4,opt,name=header_key_format,json=headerKeyFormat,proto3" json:"header_key_format,omitempty"`
+ // Enables trailers for HTTP/1. By default the HTTP/1 codec drops proxied trailers.
+ //
+ // .. attention::
+ //
+ // Note that this only happens when Envoy is chunk encoding which occurs when:
+ // - The request is HTTP/1.1.
+ // - Is neither a HEAD only request nor a HTTP Upgrade.
+ // - Not a response to a HEAD request.
+ // - The content length header is not present.
+ EnableTrailers bool `protobuf:"varint,5,opt,name=enable_trailers,json=enableTrailers,proto3" json:"enable_trailers,omitempty"`
+ // Allows Envoy to process requests/responses with both “Content-Length“ and “Transfer-Encoding“
+ // headers set. By default such messages are rejected, but if option is enabled - Envoy will
+ // remove Content-Length header and process message.
+ // See `RFC7230, sec. 3.3.3 `_ for details.
+ //
+ // .. attention::
+ //
+ // Enabling this option might lead to request smuggling vulnerability, especially if traffic
+ // is proxied via multiple layers of proxies.
+ //
+ // [#comment:TODO: This field is ignored when the
+ // :ref:`header validation configuration `
+ // is present.]
+ AllowChunkedLength bool `protobuf:"varint,6,opt,name=allow_chunked_length,json=allowChunkedLength,proto3" json:"allow_chunked_length,omitempty"`
+ // Allows invalid HTTP messaging. When this option is false, then Envoy will terminate
+ // HTTP/1.1 connections upon receiving an invalid HTTP message. However,
+ // when this option is true, then Envoy will leave the HTTP/1.1 connection
+ // open where possible.
+ // If set, this overrides any HCM :ref:`stream_error_on_invalid_http_messaging
+ // `.
+ OverrideStreamErrorOnInvalidHttpMessage *wrapperspb.BoolValue `protobuf:"bytes,7,opt,name=override_stream_error_on_invalid_http_message,json=overrideStreamErrorOnInvalidHttpMessage,proto3" json:"override_stream_error_on_invalid_http_message,omitempty"`
+ // Allows sending fully qualified URLs when proxying the first line of the
+ // response. By default, Envoy will only send the path components in the first line.
+ // If this is true, Envoy will create a fully qualified URI composing scheme
+ // (inferred if not present), host (from the host/:authority header) and path
+ // (from first line or :path header).
+ SendFullyQualifiedUrl bool `protobuf:"varint,8,opt,name=send_fully_qualified_url,json=sendFullyQualifiedUrl,proto3" json:"send_fully_qualified_url,omitempty"`
+ // [#not-implemented-hide:] Hiding so that field can be removed after BalsaParser is rolled out.
+ // If set, force HTTP/1 parser: BalsaParser if true, http-parser if false.
+ // If unset, HTTP/1 parser is selected based on
+ // envoy.reloadable_features.http1_use_balsa_parser.
+ // See issue #21245.
+ UseBalsaParser *wrapperspb.BoolValue `protobuf:"bytes,9,opt,name=use_balsa_parser,json=useBalsaParser,proto3" json:"use_balsa_parser,omitempty"`
+ // [#not-implemented-hide:] Hiding so that field can be removed.
+ // If true, and BalsaParser is used (either `use_balsa_parser` above is true,
+ // or `envoy.reloadable_features.http1_use_balsa_parser` is true and
+ // `use_balsa_parser` is unset), then every non-empty method with only valid
+ // characters is accepted. Otherwise, methods not on the hard-coded list are
+ // rejected.
+ // Once UHV is enabled, this field should be removed, and BalsaParser should
+ // allow any method. UHV validates the method, rejecting empty string or
+ // invalid characters, and provides :ref:`restrict_http_methods
+ // `
+ // to reject custom methods.
+ AllowCustomMethods bool `protobuf:"varint,10,opt,name=allow_custom_methods,json=allowCustomMethods,proto3" json:"allow_custom_methods,omitempty"`
+}
+
+func (x *Http1ProtocolOptions) Reset() {
+ *x = Http1ProtocolOptions{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Http1ProtocolOptions) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Http1ProtocolOptions) ProtoMessage() {}
+
+func (x *Http1ProtocolOptions) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Http1ProtocolOptions.ProtoReflect.Descriptor instead.
+func (*Http1ProtocolOptions) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_protocol_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *Http1ProtocolOptions) GetAllowAbsoluteUrl() *wrapperspb.BoolValue {
+ if x != nil {
+ return x.AllowAbsoluteUrl
+ }
+ return nil
+}
+
+func (x *Http1ProtocolOptions) GetAcceptHttp_10() bool {
+ if x != nil {
+ return x.AcceptHttp_10
+ }
+ return false
+}
+
+func (x *Http1ProtocolOptions) GetDefaultHostForHttp_10() string {
+ if x != nil {
+ return x.DefaultHostForHttp_10
+ }
+ return ""
+}
+
+func (x *Http1ProtocolOptions) GetHeaderKeyFormat() *Http1ProtocolOptions_HeaderKeyFormat {
+ if x != nil {
+ return x.HeaderKeyFormat
+ }
+ return nil
+}
+
+func (x *Http1ProtocolOptions) GetEnableTrailers() bool {
+ if x != nil {
+ return x.EnableTrailers
+ }
+ return false
+}
+
+func (x *Http1ProtocolOptions) GetAllowChunkedLength() bool {
+ if x != nil {
+ return x.AllowChunkedLength
+ }
+ return false
+}
+
+func (x *Http1ProtocolOptions) GetOverrideStreamErrorOnInvalidHttpMessage() *wrapperspb.BoolValue {
+ if x != nil {
+ return x.OverrideStreamErrorOnInvalidHttpMessage
+ }
+ return nil
+}
+
+func (x *Http1ProtocolOptions) GetSendFullyQualifiedUrl() bool {
+ if x != nil {
+ return x.SendFullyQualifiedUrl
+ }
+ return false
+}
+
+func (x *Http1ProtocolOptions) GetUseBalsaParser() *wrapperspb.BoolValue {
+ if x != nil {
+ return x.UseBalsaParser
+ }
+ return nil
+}
+
+func (x *Http1ProtocolOptions) GetAllowCustomMethods() bool {
+ if x != nil {
+ return x.AllowCustomMethods
+ }
+ return false
+}
+
+type KeepaliveSettings struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Send HTTP/2 PING frames at this period, in order to test that the connection is still alive.
+ // If this is zero, interval PINGs will not be sent.
+ Interval *durationpb.Duration `protobuf:"bytes,1,opt,name=interval,proto3" json:"interval,omitempty"`
+ // How long to wait for a response to a keepalive PING. If a response is not received within this
+ // time period, the connection will be aborted. Note that in order to prevent the influence of
+ // Head-of-line (HOL) blocking the timeout period is extended when *any* frame is received on
+ // the connection, under the assumption that if a frame is received the connection is healthy.
+ Timeout *durationpb.Duration `protobuf:"bytes,2,opt,name=timeout,proto3" json:"timeout,omitempty"`
+ // A random jitter amount as a percentage of interval that will be added to each interval.
+ // A value of zero means there will be no jitter.
+ // The default value is 15%.
+ IntervalJitter *v3.Percent `protobuf:"bytes,3,opt,name=interval_jitter,json=intervalJitter,proto3" json:"interval_jitter,omitempty"`
+ // If the connection has been idle for this duration, send a HTTP/2 ping ahead
+ // of new stream creation, to quickly detect dead connections.
+ // If this is zero, this type of PING will not be sent.
+ // If an interval ping is outstanding, a second ping will not be sent as the
+ // interval ping will determine if the connection is dead.
+ //
+ // The same feature for HTTP/3 is given by inheritance from QUICHE which uses :ref:`connection idle_timeout ` and the current PTO of the connection to decide whether to probe before sending a new request.
+ ConnectionIdleInterval *durationpb.Duration `protobuf:"bytes,4,opt,name=connection_idle_interval,json=connectionIdleInterval,proto3" json:"connection_idle_interval,omitempty"`
+}
+
+func (x *KeepaliveSettings) Reset() {
+ *x = KeepaliveSettings{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *KeepaliveSettings) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*KeepaliveSettings) ProtoMessage() {}
+
+func (x *KeepaliveSettings) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use KeepaliveSettings.ProtoReflect.Descriptor instead.
+func (*KeepaliveSettings) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_protocol_proto_rawDescGZIP(), []int{7}
+}
+
+func (x *KeepaliveSettings) GetInterval() *durationpb.Duration {
+ if x != nil {
+ return x.Interval
+ }
+ return nil
+}
+
+func (x *KeepaliveSettings) GetTimeout() *durationpb.Duration {
+ if x != nil {
+ return x.Timeout
+ }
+ return nil
+}
+
+func (x *KeepaliveSettings) GetIntervalJitter() *v3.Percent {
+ if x != nil {
+ return x.IntervalJitter
+ }
+ return nil
+}
+
+func (x *KeepaliveSettings) GetConnectionIdleInterval() *durationpb.Duration {
+ if x != nil {
+ return x.ConnectionIdleInterval
+ }
+ return nil
+}
+
+// [#next-free-field: 17]
+type Http2ProtocolOptions struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // `Maximum table size `_
+ // (in octets) that the encoder is permitted to use for the dynamic HPACK table. Valid values
+ // range from 0 to 4294967295 (2^32 - 1) and defaults to 4096. 0 effectively disables header
+ // compression.
+ HpackTableSize *wrapperspb.UInt32Value `protobuf:"bytes,1,opt,name=hpack_table_size,json=hpackTableSize,proto3" json:"hpack_table_size,omitempty"`
+ // `Maximum concurrent streams `_
+ // allowed for peer on one HTTP/2 connection. Valid values range from 1 to 2147483647 (2^31 - 1)
+ // and defaults to 2147483647.
+ //
+ // For upstream connections, this also limits how many streams Envoy will initiate concurrently
+ // on a single connection. If the limit is reached, Envoy may queue requests or establish
+ // additional connections (as allowed per circuit breaker limits).
+ //
+ // This acts as an upper bound: Envoy will lower the max concurrent streams allowed on a given
+ // connection based on upstream settings. Config dumps will reflect the configured upper bound,
+ // not the per-connection negotiated limits.
+ MaxConcurrentStreams *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=max_concurrent_streams,json=maxConcurrentStreams,proto3" json:"max_concurrent_streams,omitempty"`
+ // `Initial stream-level flow-control window
+ // `_ size. Valid values range from 65535
+ // (2^16 - 1, HTTP/2 default) to 2147483647 (2^31 - 1, HTTP/2 maximum) and defaults to 268435456
+ // (256 * 1024 * 1024).
+ //
+ // NOTE: 65535 is the initial window size from HTTP/2 spec. We only support increasing the default
+ // window size now, so it's also the minimum.
+ //
+ // This field also acts as a soft limit on the number of bytes Envoy will buffer per-stream in the
+ // HTTP/2 codec buffers. Once the buffer reaches this pointer, watermark callbacks will fire to
+ // stop the flow of data to the codec buffers.
+ InitialStreamWindowSize *wrapperspb.UInt32Value `protobuf:"bytes,3,opt,name=initial_stream_window_size,json=initialStreamWindowSize,proto3" json:"initial_stream_window_size,omitempty"`
+ // Similar to “initial_stream_window_size“, but for connection-level flow-control
+ // window. Currently, this has the same minimum/maximum/default as “initial_stream_window_size“.
+ InitialConnectionWindowSize *wrapperspb.UInt32Value `protobuf:"bytes,4,opt,name=initial_connection_window_size,json=initialConnectionWindowSize,proto3" json:"initial_connection_window_size,omitempty"`
+ // Allows proxying Websocket and other upgrades over H2 connect.
+ AllowConnect bool `protobuf:"varint,5,opt,name=allow_connect,json=allowConnect,proto3" json:"allow_connect,omitempty"`
+ // [#not-implemented-hide:] Hiding until envoy has full metadata support.
+ // Still under implementation. DO NOT USE.
+ //
+ // Allows metadata. See [metadata
+ // docs](https://github.com/envoyproxy/envoy/blob/main/source/docs/h2_metadata.md) for more
+ // information.
+ AllowMetadata bool `protobuf:"varint,6,opt,name=allow_metadata,json=allowMetadata,proto3" json:"allow_metadata,omitempty"`
+ // Limit the number of pending outbound downstream frames of all types (frames that are waiting to
+ // be written into the socket). Exceeding this limit triggers flood mitigation and connection is
+ // terminated. The “http2.outbound_flood“ stat tracks the number of terminated connections due
+ // to flood mitigation. The default limit is 10000.
+ MaxOutboundFrames *wrapperspb.UInt32Value `protobuf:"bytes,7,opt,name=max_outbound_frames,json=maxOutboundFrames,proto3" json:"max_outbound_frames,omitempty"`
+ // Limit the number of pending outbound downstream frames of types PING, SETTINGS and RST_STREAM,
+ // preventing high memory utilization when receiving continuous stream of these frames. Exceeding
+ // this limit triggers flood mitigation and connection is terminated. The
+ // “http2.outbound_control_flood“ stat tracks the number of terminated connections due to flood
+ // mitigation. The default limit is 1000.
+ MaxOutboundControlFrames *wrapperspb.UInt32Value `protobuf:"bytes,8,opt,name=max_outbound_control_frames,json=maxOutboundControlFrames,proto3" json:"max_outbound_control_frames,omitempty"`
+ // Limit the number of consecutive inbound frames of types HEADERS, CONTINUATION and DATA with an
+ // empty payload and no end stream flag. Those frames have no legitimate use and are abusive, but
+ // might be a result of a broken HTTP/2 implementation. The `http2.inbound_empty_frames_flood“
+ // stat tracks the number of connections terminated due to flood mitigation.
+ // Setting this to 0 will terminate connection upon receiving first frame with an empty payload
+ // and no end stream flag. The default limit is 1.
+ MaxConsecutiveInboundFramesWithEmptyPayload *wrapperspb.UInt32Value `protobuf:"bytes,9,opt,name=max_consecutive_inbound_frames_with_empty_payload,json=maxConsecutiveInboundFramesWithEmptyPayload,proto3" json:"max_consecutive_inbound_frames_with_empty_payload,omitempty"`
+ // Limit the number of inbound PRIORITY frames allowed per each opened stream. If the number
+ // of PRIORITY frames received over the lifetime of connection exceeds the value calculated
+ // using this formula::
+ //
+ // ``max_inbound_priority_frames_per_stream`` * (1 + ``opened_streams``)
+ //
+ // the connection is terminated. For downstream connections the “opened_streams“ is incremented when
+ // Envoy receives complete response headers from the upstream server. For upstream connection the
+ // “opened_streams“ is incremented when Envoy send the HEADERS frame for a new stream. The
+ // “http2.inbound_priority_frames_flood“ stat tracks
+ // the number of connections terminated due to flood mitigation. The default limit is 100.
+ MaxInboundPriorityFramesPerStream *wrapperspb.UInt32Value `protobuf:"bytes,10,opt,name=max_inbound_priority_frames_per_stream,json=maxInboundPriorityFramesPerStream,proto3" json:"max_inbound_priority_frames_per_stream,omitempty"`
+ // Limit the number of inbound WINDOW_UPDATE frames allowed per DATA frame sent. If the number
+ // of WINDOW_UPDATE frames received over the lifetime of connection exceeds the value calculated
+ // using this formula::
+ //
+ // 5 + 2 * (``opened_streams`` +
+ // ``max_inbound_window_update_frames_per_data_frame_sent`` * ``outbound_data_frames``)
+ //
+ // the connection is terminated. For downstream connections the “opened_streams“ is incremented when
+ // Envoy receives complete response headers from the upstream server. For upstream connections the
+ // “opened_streams“ is incremented when Envoy sends the HEADERS frame for a new stream. The
+ // “http2.inbound_priority_frames_flood“ stat tracks the number of connections terminated due to
+ // flood mitigation. The default max_inbound_window_update_frames_per_data_frame_sent value is 10.
+ // Setting this to 1 should be enough to support HTTP/2 implementations with basic flow control,
+ // but more complex implementations that try to estimate available bandwidth require at least 2.
+ MaxInboundWindowUpdateFramesPerDataFrameSent *wrapperspb.UInt32Value `protobuf:"bytes,11,opt,name=max_inbound_window_update_frames_per_data_frame_sent,json=maxInboundWindowUpdateFramesPerDataFrameSent,proto3" json:"max_inbound_window_update_frames_per_data_frame_sent,omitempty"`
+ // Allows invalid HTTP messaging and headers. When this option is disabled (default), then
+ // the whole HTTP/2 connection is terminated upon receiving invalid HEADERS frame. However,
+ // when this option is enabled, only the offending stream is terminated.
+ //
+ // This is overridden by HCM :ref:`stream_error_on_invalid_http_messaging
+ // `
+ // iff present.
+ //
+ // This is deprecated in favor of :ref:`override_stream_error_on_invalid_http_message
+ // `
+ //
+ // See `RFC7540, sec. 8.1 `_ for details.
+ //
+ // Deprecated: Do not use.
+ StreamErrorOnInvalidHttpMessaging bool `protobuf:"varint,12,opt,name=stream_error_on_invalid_http_messaging,json=streamErrorOnInvalidHttpMessaging,proto3" json:"stream_error_on_invalid_http_messaging,omitempty"`
+ // Allows invalid HTTP messaging and headers. When this option is disabled (default), then
+ // the whole HTTP/2 connection is terminated upon receiving invalid HEADERS frame. However,
+ // when this option is enabled, only the offending stream is terminated.
+ //
+ // This overrides any HCM :ref:`stream_error_on_invalid_http_messaging
+ // `
+ //
+ // See `RFC7540, sec. 8.1 `_ for details.
+ OverrideStreamErrorOnInvalidHttpMessage *wrapperspb.BoolValue `protobuf:"bytes,14,opt,name=override_stream_error_on_invalid_http_message,json=overrideStreamErrorOnInvalidHttpMessage,proto3" json:"override_stream_error_on_invalid_http_message,omitempty"`
+ // [#not-implemented-hide:]
+ // Specifies SETTINGS frame parameters to be sent to the peer, with two exceptions:
+ //
+ // 1. SETTINGS_ENABLE_PUSH (0x2) is not configurable as HTTP/2 server push is not supported by
+ // Envoy.
+ //
+ // 2. SETTINGS_ENABLE_CONNECT_PROTOCOL (0x8) is only configurable through the named field
+ // 'allow_connect'.
+ //
+ // Note that custom parameters specified through this field can not also be set in the
+ // corresponding named parameters:
+ //
+ // .. code-block:: text
+ //
+ // ID Field Name
+ // ----------------
+ // 0x1 hpack_table_size
+ // 0x3 max_concurrent_streams
+ // 0x4 initial_stream_window_size
+ //
+ // Collisions will trigger config validation failure on load/update. Likewise, inconsistencies
+ // between custom parameters with the same identifier will trigger a failure.
+ //
+ // See `IANA HTTP/2 Settings
+ // `_ for
+ // standardized identifiers.
+ CustomSettingsParameters []*Http2ProtocolOptions_SettingsParameter `protobuf:"bytes,13,rep,name=custom_settings_parameters,json=customSettingsParameters,proto3" json:"custom_settings_parameters,omitempty"`
+ // Send HTTP/2 PING frames to verify that the connection is still healthy. If the remote peer
+ // does not respond within the configured timeout, the connection will be aborted.
+ ConnectionKeepalive *KeepaliveSettings `protobuf:"bytes,15,opt,name=connection_keepalive,json=connectionKeepalive,proto3" json:"connection_keepalive,omitempty"`
+ // [#not-implemented-hide:] Hiding so that the field can be removed after oghttp2 is rolled out.
+ // If set, force use of a particular HTTP/2 codec: oghttp2 if true, nghttp2 if false.
+ // If unset, HTTP/2 codec is selected based on envoy.reloadable_features.http2_use_oghttp2.
+ UseOghttp2Codec *wrapperspb.BoolValue `protobuf:"bytes,16,opt,name=use_oghttp2_codec,json=useOghttp2Codec,proto3" json:"use_oghttp2_codec,omitempty"`
+}
+
+func (x *Http2ProtocolOptions) Reset() {
+ *x = Http2ProtocolOptions{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Http2ProtocolOptions) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Http2ProtocolOptions) ProtoMessage() {}
+
+func (x *Http2ProtocolOptions) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Http2ProtocolOptions.ProtoReflect.Descriptor instead.
+func (*Http2ProtocolOptions) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_protocol_proto_rawDescGZIP(), []int{8}
+}
+
+func (x *Http2ProtocolOptions) GetHpackTableSize() *wrapperspb.UInt32Value {
+ if x != nil {
+ return x.HpackTableSize
+ }
+ return nil
+}
+
+func (x *Http2ProtocolOptions) GetMaxConcurrentStreams() *wrapperspb.UInt32Value {
+ if x != nil {
+ return x.MaxConcurrentStreams
+ }
+ return nil
+}
+
+func (x *Http2ProtocolOptions) GetInitialStreamWindowSize() *wrapperspb.UInt32Value {
+ if x != nil {
+ return x.InitialStreamWindowSize
+ }
+ return nil
+}
+
+func (x *Http2ProtocolOptions) GetInitialConnectionWindowSize() *wrapperspb.UInt32Value {
+ if x != nil {
+ return x.InitialConnectionWindowSize
+ }
+ return nil
+}
+
+func (x *Http2ProtocolOptions) GetAllowConnect() bool {
+ if x != nil {
+ return x.AllowConnect
+ }
+ return false
+}
+
+func (x *Http2ProtocolOptions) GetAllowMetadata() bool {
+ if x != nil {
+ return x.AllowMetadata
+ }
+ return false
+}
+
+func (x *Http2ProtocolOptions) GetMaxOutboundFrames() *wrapperspb.UInt32Value {
+ if x != nil {
+ return x.MaxOutboundFrames
+ }
+ return nil
+}
+
+func (x *Http2ProtocolOptions) GetMaxOutboundControlFrames() *wrapperspb.UInt32Value {
+ if x != nil {
+ return x.MaxOutboundControlFrames
+ }
+ return nil
+}
+
+func (x *Http2ProtocolOptions) GetMaxConsecutiveInboundFramesWithEmptyPayload() *wrapperspb.UInt32Value {
+ if x != nil {
+ return x.MaxConsecutiveInboundFramesWithEmptyPayload
+ }
+ return nil
+}
+
+func (x *Http2ProtocolOptions) GetMaxInboundPriorityFramesPerStream() *wrapperspb.UInt32Value {
+ if x != nil {
+ return x.MaxInboundPriorityFramesPerStream
+ }
+ return nil
+}
+
+func (x *Http2ProtocolOptions) GetMaxInboundWindowUpdateFramesPerDataFrameSent() *wrapperspb.UInt32Value {
+ if x != nil {
+ return x.MaxInboundWindowUpdateFramesPerDataFrameSent
+ }
+ return nil
+}
+
+// Deprecated: Do not use.
+func (x *Http2ProtocolOptions) GetStreamErrorOnInvalidHttpMessaging() bool {
+ if x != nil {
+ return x.StreamErrorOnInvalidHttpMessaging
+ }
+ return false
+}
+
+func (x *Http2ProtocolOptions) GetOverrideStreamErrorOnInvalidHttpMessage() *wrapperspb.BoolValue {
+ if x != nil {
+ return x.OverrideStreamErrorOnInvalidHttpMessage
+ }
+ return nil
+}
+
+func (x *Http2ProtocolOptions) GetCustomSettingsParameters() []*Http2ProtocolOptions_SettingsParameter {
+ if x != nil {
+ return x.CustomSettingsParameters
+ }
+ return nil
+}
+
+func (x *Http2ProtocolOptions) GetConnectionKeepalive() *KeepaliveSettings {
+ if x != nil {
+ return x.ConnectionKeepalive
+ }
+ return nil
+}
+
+func (x *Http2ProtocolOptions) GetUseOghttp2Codec() *wrapperspb.BoolValue {
+ if x != nil {
+ return x.UseOghttp2Codec
+ }
+ return nil
+}
+
+// [#not-implemented-hide:]
+type GrpcProtocolOptions struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Http2ProtocolOptions *Http2ProtocolOptions `protobuf:"bytes,1,opt,name=http2_protocol_options,json=http2ProtocolOptions,proto3" json:"http2_protocol_options,omitempty"`
+}
+
+func (x *GrpcProtocolOptions) Reset() {
+ *x = GrpcProtocolOptions{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GrpcProtocolOptions) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GrpcProtocolOptions) ProtoMessage() {}
+
+func (x *GrpcProtocolOptions) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GrpcProtocolOptions.ProtoReflect.Descriptor instead.
+func (*GrpcProtocolOptions) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_protocol_proto_rawDescGZIP(), []int{9}
+}
+
+func (x *GrpcProtocolOptions) GetHttp2ProtocolOptions() *Http2ProtocolOptions {
+ if x != nil {
+ return x.Http2ProtocolOptions
+ }
+ return nil
+}
+
+// A message which allows using HTTP/3.
+// [#next-free-field: 6]
+type Http3ProtocolOptions struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ QuicProtocolOptions *QuicProtocolOptions `protobuf:"bytes,1,opt,name=quic_protocol_options,json=quicProtocolOptions,proto3" json:"quic_protocol_options,omitempty"`
+ // Allows invalid HTTP messaging and headers. When this option is disabled (default), then
+ // the whole HTTP/3 connection is terminated upon receiving invalid HEADERS frame. However,
+ // when this option is enabled, only the offending stream is terminated.
+ //
+ // If set, this overrides any HCM :ref:`stream_error_on_invalid_http_messaging
+ // `.
+ OverrideStreamErrorOnInvalidHttpMessage *wrapperspb.BoolValue `protobuf:"bytes,2,opt,name=override_stream_error_on_invalid_http_message,json=overrideStreamErrorOnInvalidHttpMessage,proto3" json:"override_stream_error_on_invalid_http_message,omitempty"`
+ // Allows proxying Websocket and other upgrades over HTTP/3 CONNECT using
+ // the header mechanisms from the `HTTP/2 extended connect RFC
+ // `_
+ // and settings `proposed for HTTP/3
+ // `_
+ // Note that HTTP/3 CONNECT is not yet an RFC.
+ AllowExtendedConnect bool `protobuf:"varint,5,opt,name=allow_extended_connect,json=allowExtendedConnect,proto3" json:"allow_extended_connect,omitempty"`
+}
+
+func (x *Http3ProtocolOptions) Reset() {
+ *x = Http3ProtocolOptions{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Http3ProtocolOptions) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Http3ProtocolOptions) ProtoMessage() {}
+
+func (x *Http3ProtocolOptions) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[10]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Http3ProtocolOptions.ProtoReflect.Descriptor instead.
+func (*Http3ProtocolOptions) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_protocol_proto_rawDescGZIP(), []int{10}
+}
+
+func (x *Http3ProtocolOptions) GetQuicProtocolOptions() *QuicProtocolOptions {
+ if x != nil {
+ return x.QuicProtocolOptions
+ }
+ return nil
+}
+
+func (x *Http3ProtocolOptions) GetOverrideStreamErrorOnInvalidHttpMessage() *wrapperspb.BoolValue {
+ if x != nil {
+ return x.OverrideStreamErrorOnInvalidHttpMessage
+ }
+ return nil
+}
+
+func (x *Http3ProtocolOptions) GetAllowExtendedConnect() bool {
+ if x != nil {
+ return x.AllowExtendedConnect
+ }
+ return false
+}
+
+// A message to control transformations to the :scheme header
+type SchemeHeaderTransformation struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to Transformation:
+ //
+ // *SchemeHeaderTransformation_SchemeToOverwrite
+ Transformation isSchemeHeaderTransformation_Transformation `protobuf_oneof:"transformation"`
+}
+
+func (x *SchemeHeaderTransformation) Reset() {
+ *x = SchemeHeaderTransformation{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SchemeHeaderTransformation) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SchemeHeaderTransformation) ProtoMessage() {}
+
+func (x *SchemeHeaderTransformation) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[11]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SchemeHeaderTransformation.ProtoReflect.Descriptor instead.
+func (*SchemeHeaderTransformation) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_protocol_proto_rawDescGZIP(), []int{11}
+}
+
+func (m *SchemeHeaderTransformation) GetTransformation() isSchemeHeaderTransformation_Transformation {
+ if m != nil {
+ return m.Transformation
+ }
+ return nil
+}
+
+func (x *SchemeHeaderTransformation) GetSchemeToOverwrite() string {
+ if x, ok := x.GetTransformation().(*SchemeHeaderTransformation_SchemeToOverwrite); ok {
+ return x.SchemeToOverwrite
+ }
+ return ""
+}
+
+type isSchemeHeaderTransformation_Transformation interface {
+ isSchemeHeaderTransformation_Transformation()
+}
+
+type SchemeHeaderTransformation_SchemeToOverwrite struct {
+ // Overwrite any Scheme header with the contents of this string.
+ SchemeToOverwrite string `protobuf:"bytes,1,opt,name=scheme_to_overwrite,json=schemeToOverwrite,proto3,oneof"`
+}
+
+func (*SchemeHeaderTransformation_SchemeToOverwrite) isSchemeHeaderTransformation_Transformation() {}
+
+// Allows pre-populating the cache with HTTP/3 alternate protocols entries with a 7 day lifetime.
+// This will cause Envoy to attempt HTTP/3 to those upstreams, even if the upstreams have not
+// advertised HTTP/3 support. These entries will be overwritten by alt-svc
+// response headers or cached values.
+// As with regular cached entries, if the origin response would result in clearing an existing
+// alternate protocol cache entry, pre-populated entries will also be cleared.
+// Adding a cache entry with hostname=foo.com port=123 is the equivalent of getting
+// response headers
+// alt-svc: h3=:"123"; ma=86400" in a response to a request to foo.com:123
+type AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The host name for the alternate protocol entry.
+ Hostname string `protobuf:"bytes,1,opt,name=hostname,proto3" json:"hostname,omitempty"`
+ // The port for the alternate protocol entry.
+ Port uint32 `protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"`
+}
+
+func (x *AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry) Reset() {
+ *x = AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry) ProtoMessage() {}
+
+func (x *AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[12]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry.ProtoReflect.Descriptor instead.
+func (*AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_protocol_proto_rawDescGZIP(), []int{4, 0}
+}
+
+func (x *AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry) GetHostname() string {
+ if x != nil {
+ return x.Hostname
+ }
+ return ""
+}
+
+func (x *AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry) GetPort() uint32 {
+ if x != nil {
+ return x.Port
+ }
+ return 0
+}
+
+// [#next-free-field: 9]
+type Http1ProtocolOptions_HeaderKeyFormat struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to HeaderFormat:
+ //
+ // *Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords_
+ // *Http1ProtocolOptions_HeaderKeyFormat_StatefulFormatter
+ HeaderFormat isHttp1ProtocolOptions_HeaderKeyFormat_HeaderFormat `protobuf_oneof:"header_format"`
+}
+
+func (x *Http1ProtocolOptions_HeaderKeyFormat) Reset() {
+ *x = Http1ProtocolOptions_HeaderKeyFormat{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Http1ProtocolOptions_HeaderKeyFormat) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Http1ProtocolOptions_HeaderKeyFormat) ProtoMessage() {}
+
+func (x *Http1ProtocolOptions_HeaderKeyFormat) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[13]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Http1ProtocolOptions_HeaderKeyFormat.ProtoReflect.Descriptor instead.
+func (*Http1ProtocolOptions_HeaderKeyFormat) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_protocol_proto_rawDescGZIP(), []int{6, 0}
+}
+
+func (m *Http1ProtocolOptions_HeaderKeyFormat) GetHeaderFormat() isHttp1ProtocolOptions_HeaderKeyFormat_HeaderFormat {
+ if m != nil {
+ return m.HeaderFormat
+ }
+ return nil
+}
+
+func (x *Http1ProtocolOptions_HeaderKeyFormat) GetProperCaseWords() *Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords {
+ if x, ok := x.GetHeaderFormat().(*Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords_); ok {
+ return x.ProperCaseWords
+ }
+ return nil
+}
+
+func (x *Http1ProtocolOptions_HeaderKeyFormat) GetStatefulFormatter() *TypedExtensionConfig {
+ if x, ok := x.GetHeaderFormat().(*Http1ProtocolOptions_HeaderKeyFormat_StatefulFormatter); ok {
+ return x.StatefulFormatter
+ }
+ return nil
+}
+
+type isHttp1ProtocolOptions_HeaderKeyFormat_HeaderFormat interface {
+ isHttp1ProtocolOptions_HeaderKeyFormat_HeaderFormat()
+}
+
+type Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords_ struct {
+ // Formats the header by proper casing words: the first character and any character following
+ // a special character will be capitalized if it's an alpha character. For example,
+ // "content-type" becomes "Content-Type", and "foo$b#$are" becomes "Foo$B#$Are".
+ // Note that while this results in most headers following conventional casing, certain headers
+ // are not covered. For example, the "TE" header will be formatted as "Te".
+ ProperCaseWords *Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords `protobuf:"bytes,1,opt,name=proper_case_words,json=properCaseWords,proto3,oneof"`
+}
+
+type Http1ProtocolOptions_HeaderKeyFormat_StatefulFormatter struct {
+ // Configuration for stateful formatter extensions that allow using received headers to
+ // affect the output of encoding headers. E.g., preserving case during proxying.
+ // [#extension-category: envoy.http.stateful_header_formatters]
+ StatefulFormatter *TypedExtensionConfig `protobuf:"bytes,8,opt,name=stateful_formatter,json=statefulFormatter,proto3,oneof"`
+}
+
+func (*Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords_) isHttp1ProtocolOptions_HeaderKeyFormat_HeaderFormat() {
+}
+
+func (*Http1ProtocolOptions_HeaderKeyFormat_StatefulFormatter) isHttp1ProtocolOptions_HeaderKeyFormat_HeaderFormat() {
+}
+
+type Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords) Reset() {
+ *x = Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords) ProtoMessage() {}
+
+func (x *Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[14]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords.ProtoReflect.Descriptor instead.
+func (*Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_protocol_proto_rawDescGZIP(), []int{6, 0, 0}
+}
+
+// Defines a parameter to be sent in the SETTINGS frame.
+// See `RFC7540, sec. 6.5.1 `_ for details.
+type Http2ProtocolOptions_SettingsParameter struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The 16 bit parameter identifier.
+ Identifier *wrapperspb.UInt32Value `protobuf:"bytes,1,opt,name=identifier,proto3" json:"identifier,omitempty"`
+ // The 32 bit parameter value.
+ Value *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (x *Http2ProtocolOptions_SettingsParameter) Reset() {
+ *x = Http2ProtocolOptions_SettingsParameter{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[15]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Http2ProtocolOptions_SettingsParameter) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Http2ProtocolOptions_SettingsParameter) ProtoMessage() {}
+
+func (x *Http2ProtocolOptions_SettingsParameter) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_protocol_proto_msgTypes[15]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Http2ProtocolOptions_SettingsParameter.ProtoReflect.Descriptor instead.
+func (*Http2ProtocolOptions_SettingsParameter) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_protocol_proto_rawDescGZIP(), []int{8, 0}
+}
+
+func (x *Http2ProtocolOptions_SettingsParameter) GetIdentifier() *wrapperspb.UInt32Value {
+ if x != nil {
+ return x.Identifier
+ }
+ return nil
+}
+
+func (x *Http2ProtocolOptions_SettingsParameter) GetValue() *wrapperspb.UInt32Value {
+ if x != nil {
+ return x.Value
+ }
+ return nil
+}
+
+var File_envoy_config_core_v3_protocol_proto protoreflect.FileDescriptor
+
+var file_envoy_config_core_v3_protocol_proto_rawDesc = []byte{
+ 0x0a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63,
+ 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x24, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76,
+ 0x33, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x1a, 0x1b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33,
+ 0x2f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f,
+ 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f,
+ 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f,
+ 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f,
+ 0x76, 0x33, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
+ 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x2f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65,
+ 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22,
+ 0x41, 0x0a, 0x12, 0x54, 0x63, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3a, 0x2b, 0x9a, 0xc5, 0x88, 0x1e, 0x26, 0x0a, 0x24, 0x65, 0x6e,
+ 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e,
+ 0x54, 0x63, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x22, 0xb7, 0x01, 0x0a, 0x15, 0x51, 0x75, 0x69, 0x63, 0x4b, 0x65, 0x65, 0x70, 0x41,
+ 0x6c, 0x69, 0x76, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x4a, 0x0a, 0x0c,
+ 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0c, 0xfa,
+ 0x42, 0x09, 0xaa, 0x01, 0x06, 0x22, 0x00, 0x32, 0x02, 0x08, 0x01, 0x52, 0x0b, 0x6d, 0x61, 0x78,
+ 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x52, 0x0a, 0x10, 0x69, 0x6e, 0x69, 0x74,
+ 0x69, 0x61, 0x6c, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0c, 0xfa,
+ 0x42, 0x09, 0xaa, 0x01, 0x06, 0x22, 0x00, 0x32, 0x02, 0x08, 0x01, 0x52, 0x0f, 0x69, 0x6e, 0x69,
+ 0x74, 0x69, 0x61, 0x6c, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x22, 0xa8, 0x04, 0x0a,
+ 0x13, 0x51, 0x75, 0x69, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5b, 0x0a, 0x16, 0x6d, 0x61, 0x78, 0x5f, 0x63, 0x6f, 0x6e, 0x63,
+ 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c,
+ 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x01, 0x52, 0x14, 0x6d, 0x61, 0x78,
+ 0x43, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d,
+ 0x73, 0x12, 0x67, 0x0a, 0x1a, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x72,
+ 0x65, 0x61, 0x6d, 0x5f, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x42, 0x0c, 0xfa, 0x42, 0x09, 0x2a, 0x07, 0x18, 0x80, 0x80, 0x80, 0x08, 0x28,
+ 0x01, 0x52, 0x17, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d,
+ 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x6f, 0x0a, 0x1e, 0x69, 0x6e,
+ 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
+ 0x5f, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65,
+ 0x42, 0x0c, 0xfa, 0x42, 0x09, 0x2a, 0x07, 0x18, 0x80, 0x80, 0x80, 0x0c, 0x28, 0x01, 0x52, 0x1b,
+ 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f,
+ 0x6e, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x7a, 0x0a, 0x26, 0x6e,
+ 0x75, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x74,
+ 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x6d, 0x69, 0x67, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49,
+ 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x09, 0xfa, 0x42, 0x06, 0x2a, 0x04,
+ 0x18, 0x05, 0x28, 0x00, 0x52, 0x21, 0x6e, 0x75, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74,
+ 0x73, 0x54, 0x6f, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x50, 0x6f, 0x72, 0x74, 0x4d, 0x69,
+ 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x5e, 0x0a, 0x14, 0x63, 0x6f, 0x6e, 0x6e, 0x65,
+ 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x18,
+ 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x51, 0x75, 0x69,
+ 0x63, 0x4b, 0x65, 0x65, 0x70, 0x41, 0x6c, 0x69, 0x76, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e,
+ 0x67, 0x73, 0x52, 0x13, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65,
+ 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x22, 0xe4, 0x01, 0x0a, 0x1b, 0x55, 0x70, 0x73, 0x74,
+ 0x72, 0x65, 0x61, 0x6d, 0x48, 0x74, 0x74, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c,
+ 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x61, 0x75, 0x74, 0x6f, 0x5f,
+ 0x73, 0x6e, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x61, 0x75, 0x74, 0x6f, 0x53,
+ 0x6e, 0x69, 0x12, 0x2e, 0x0a, 0x13, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x73, 0x61, 0x6e, 0x5f, 0x76,
+ 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52,
+ 0x11, 0x61, 0x75, 0x74, 0x6f, 0x53, 0x61, 0x6e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x12, 0x44, 0x0a, 0x18, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x5f, 0x61,
+ 0x75, 0x74, 0x6f, 0x5f, 0x73, 0x6e, 0x69, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc0, 0x01, 0x01, 0xd0, 0x01,
+ 0x01, 0x52, 0x15, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x41, 0x75, 0x74, 0x6f, 0x53,
+ 0x6e, 0x69, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x3a, 0x34, 0x9a, 0xc5, 0x88, 0x1e, 0x2f, 0x0a,
+ 0x2d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f,
+ 0x72, 0x65, 0x2e, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x48, 0x74, 0x74, 0x70, 0x50,
+ 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x86,
+ 0x04, 0x0a, 0x1e, 0x41, 0x6c, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x74,
+ 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x43, 0x61, 0x63, 0x68, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42,
+ 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x46,
+ 0x0a, 0x0b, 0x6d, 0x61, 0x78, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x20, 0x00, 0x52, 0x0a, 0x6d, 0x61, 0x78, 0x45,
+ 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x5f, 0x0a, 0x16, 0x6b, 0x65, 0x79, 0x5f, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79,
+ 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x52, 0x13, 0x6b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x53, 0x74, 0x6f, 0x72,
+ 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x84, 0x01, 0x0a, 0x14, 0x70, 0x72, 0x65, 0x70,
+ 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73,
+ 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x51, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c,
+ 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73,
+ 0x43, 0x61, 0x63, 0x68, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x41, 0x6c, 0x74,
+ 0x65, 0x72, 0x6e, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x43,
+ 0x61, 0x63, 0x68, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x13, 0x70, 0x72, 0x65, 0x70, 0x6f,
+ 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x2d,
+ 0x0a, 0x12, 0x63, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x75, 0x66, 0x66,
+ 0x69, 0x78, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x11, 0x63, 0x61, 0x6e, 0x6f,
+ 0x6e, 0x69, 0x63, 0x61, 0x6c, 0x53, 0x75, 0x66, 0x66, 0x69, 0x78, 0x65, 0x73, 0x1a, 0x68, 0x0a,
+ 0x1c, 0x41, 0x6c, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63,
+ 0x6f, 0x6c, 0x73, 0x43, 0x61, 0x63, 0x68, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x27, 0x0a,
+ 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42,
+ 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc0, 0x01, 0x01, 0xd0, 0x01, 0x01, 0x52, 0x08, 0x68, 0x6f,
+ 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x0d, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x2a, 0x06, 0x10, 0xff, 0xff, 0x03, 0x20,
+ 0x00, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x22, 0xaf, 0x05, 0x0a, 0x13, 0x48, 0x74, 0x74, 0x70,
+ 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12,
+ 0x3c, 0x0a, 0x0c, 0x69, 0x64, 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x52, 0x0b, 0x69, 0x64, 0x6c, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x51, 0x0a,
+ 0x17, 0x6d, 0x61, 0x78, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
+ 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x15, 0x6d, 0x61, 0x78, 0x43, 0x6f,
+ 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x12, 0x51, 0x0a, 0x11, 0x6d, 0x61, 0x78, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f,
+ 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49,
+ 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02,
+ 0x28, 0x01, 0x52, 0x0f, 0x6d, 0x61, 0x78, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x43, 0x6f,
+ 0x75, 0x6e, 0x74, 0x12, 0x49, 0x0a, 0x13, 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61,
+ 0x6d, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x6d, 0x61, 0x78,
+ 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x8d,
+ 0x01, 0x0a, 0x1f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x5f,
+ 0x75, 0x6e, 0x64, 0x65, 0x72, 0x73, 0x63, 0x6f, 0x72, 0x65, 0x73, 0x5f, 0x61, 0x63, 0x74, 0x69,
+ 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x46, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e,
+ 0x48, 0x74, 0x74, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x57, 0x69, 0x74, 0x68, 0x55,
+ 0x6e, 0x64, 0x65, 0x72, 0x73, 0x63, 0x6f, 0x72, 0x65, 0x73, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e,
+ 0x52, 0x1c, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x57, 0x69, 0x74, 0x68, 0x55, 0x6e, 0x64,
+ 0x65, 0x72, 0x73, 0x63, 0x6f, 0x72, 0x65, 0x73, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x5b,
+ 0x0a, 0x1b, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x5f, 0x70,
+ 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x52, 0x18, 0x6d, 0x61, 0x78, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x50, 0x65,
+ 0x72, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x4e, 0x0a, 0x1c, 0x48,
+ 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x57, 0x69, 0x74, 0x68, 0x55, 0x6e, 0x64, 0x65, 0x72, 0x73,
+ 0x63, 0x6f, 0x72, 0x65, 0x73, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x09, 0x0a, 0x05, 0x41,
+ 0x4c, 0x4c, 0x4f, 0x57, 0x10, 0x00, 0x12, 0x12, 0x0a, 0x0e, 0x52, 0x45, 0x4a, 0x45, 0x43, 0x54,
+ 0x5f, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x52,
+ 0x4f, 0x50, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x02, 0x3a, 0x2c, 0x9a, 0xc5, 0x88,
+ 0x1e, 0x27, 0x0a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32,
+ 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63,
+ 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x93, 0x09, 0x0a, 0x14, 0x48, 0x74,
+ 0x74, 0x70, 0x31, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x12, 0x48, 0x0a, 0x12, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x61, 0x62, 0x73, 0x6f,
+ 0x6c, 0x75, 0x74, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x10, 0x61, 0x6c, 0x6c, 0x6f,
+ 0x77, 0x41, 0x62, 0x73, 0x6f, 0x6c, 0x75, 0x74, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x24, 0x0a, 0x0e,
+ 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x5f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x31, 0x30, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x48, 0x74, 0x74, 0x70,
+ 0x31, 0x30, 0x12, 0x36, 0x0a, 0x18, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68, 0x6f,
+ 0x73, 0x74, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x31, 0x30, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x48, 0x6f, 0x73,
+ 0x74, 0x46, 0x6f, 0x72, 0x48, 0x74, 0x74, 0x70, 0x31, 0x30, 0x12, 0x66, 0x0a, 0x11, 0x68, 0x65,
+ 0x61, 0x64, 0x65, 0x72, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18,
+ 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74,
+ 0x70, 0x31, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x46, 0x6f, 0x72, 0x6d, 0x61,
+ 0x74, 0x52, 0x0f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x46, 0x6f, 0x72, 0x6d,
+ 0x61, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x74, 0x72, 0x61,
+ 0x69, 0x6c, 0x65, 0x72, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x65, 0x6e, 0x61,
+ 0x62, 0x6c, 0x65, 0x54, 0x72, 0x61, 0x69, 0x6c, 0x65, 0x72, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x61,
+ 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x6e,
+ 0x67, 0x74, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x61, 0x6c, 0x6c, 0x6f, 0x77,
+ 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x65, 0x64, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x7a, 0x0a,
+ 0x2d, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d,
+ 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x76, 0x61, 0x6c, 0x69,
+ 0x64, 0x5f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x07,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65,
+ 0x52, 0x27, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d,
+ 0x45, 0x72, 0x72, 0x6f, 0x72, 0x4f, 0x6e, 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x48, 0x74,
+ 0x74, 0x70, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x18, 0x73, 0x65, 0x6e,
+ 0x64, 0x5f, 0x66, 0x75, 0x6c, 0x6c, 0x79, 0x5f, 0x71, 0x75, 0x61, 0x6c, 0x69, 0x66, 0x69, 0x65,
+ 0x64, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x73, 0x65, 0x6e,
+ 0x64, 0x46, 0x75, 0x6c, 0x6c, 0x79, 0x51, 0x75, 0x61, 0x6c, 0x69, 0x66, 0x69, 0x65, 0x64, 0x55,
+ 0x72, 0x6c, 0x12, 0x4e, 0x0a, 0x10, 0x75, 0x73, 0x65, 0x5f, 0x62, 0x61, 0x6c, 0x73, 0x61, 0x5f,
+ 0x70, 0x61, 0x72, 0x73, 0x65, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42,
+ 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x08, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02,
+ 0x08, 0x01, 0x52, 0x0e, 0x75, 0x73, 0x65, 0x42, 0x61, 0x6c, 0x73, 0x61, 0x50, 0x61, 0x72, 0x73,
+ 0x65, 0x72, 0x12, 0x3a, 0x0a, 0x14, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x63, 0x75, 0x73, 0x74,
+ 0x6f, 0x6d, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08,
+ 0x42, 0x08, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x52, 0x12, 0x61, 0x6c, 0x6c, 0x6f,
+ 0x77, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x73, 0x1a, 0x9f,
+ 0x03, 0x0a, 0x0f, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4b, 0x65, 0x79, 0x46, 0x6f, 0x72, 0x6d,
+ 0x61, 0x74, 0x12, 0x78, 0x0a, 0x11, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x5f, 0x63, 0x61, 0x73,
+ 0x65, 0x5f, 0x77, 0x6f, 0x72, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4a, 0x2e,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72,
+ 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x31, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63,
+ 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72,
+ 0x4b, 0x65, 0x79, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x2e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72,
+ 0x43, 0x61, 0x73, 0x65, 0x57, 0x6f, 0x72, 0x64, 0x73, 0x48, 0x00, 0x52, 0x0f, 0x70, 0x72, 0x6f,
+ 0x70, 0x65, 0x72, 0x43, 0x61, 0x73, 0x65, 0x57, 0x6f, 0x72, 0x64, 0x73, 0x12, 0x5b, 0x0a, 0x12,
+ 0x73, 0x74, 0x61, 0x74, 0x65, 0x66, 0x75, 0x6c, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x74,
+ 0x65, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e,
+ 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x11, 0x73, 0x74, 0x61, 0x74, 0x65, 0x66, 0x75, 0x6c,
+ 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x74, 0x65, 0x72, 0x1a, 0x60, 0x0a, 0x0f, 0x50, 0x72, 0x6f,
+ 0x70, 0x65, 0x72, 0x43, 0x61, 0x73, 0x65, 0x57, 0x6f, 0x72, 0x64, 0x73, 0x3a, 0x4d, 0x9a, 0xc5,
+ 0x88, 0x1e, 0x48, 0x0a, 0x46, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76,
+ 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x31, 0x50, 0x72, 0x6f, 0x74,
+ 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x48, 0x65, 0x61, 0x64,
+ 0x65, 0x72, 0x4b, 0x65, 0x79, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x2e, 0x50, 0x72, 0x6f, 0x70,
+ 0x65, 0x72, 0x43, 0x61, 0x73, 0x65, 0x57, 0x6f, 0x72, 0x64, 0x73, 0x3a, 0x3d, 0x9a, 0xc5, 0x88,
+ 0x1e, 0x38, 0x0a, 0x36, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32,
+ 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x31, 0x50, 0x72, 0x6f, 0x74, 0x6f,
+ 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65,
+ 0x72, 0x4b, 0x65, 0x79, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x42, 0x14, 0x0a, 0x0d, 0x68, 0x65,
+ 0x61, 0x64, 0x65, 0x72, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x03, 0xf8, 0x42, 0x01,
+ 0x3a, 0x2d, 0x9a, 0xc5, 0x88, 0x1e, 0x28, 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61,
+ 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x31,
+ 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22,
+ 0xc1, 0x02, 0x0a, 0x11, 0x4b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x53, 0x65, 0x74,
+ 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x43, 0x0a, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61,
+ 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x42, 0x0c, 0xfa, 0x42, 0x09, 0xaa, 0x01, 0x06, 0x32, 0x04, 0x10, 0xc0, 0x84, 0x3d,
+ 0x52, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x43, 0x0a, 0x07, 0x74, 0x69,
+ 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75,
+ 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0e, 0xfa, 0x42, 0x0b, 0xaa, 0x01, 0x08, 0x08, 0x01,
+ 0x32, 0x04, 0x10, 0xc0, 0x84, 0x3d, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12,
+ 0x3f, 0x0a, 0x0f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x5f, 0x6a, 0x69, 0x74, 0x74,
+ 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74,
+ 0x52, 0x0e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x4a, 0x69, 0x74, 0x74, 0x65, 0x72,
+ 0x12, 0x61, 0x0a, 0x18, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69,
+ 0x64, 0x6c, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0c, 0xfa,
+ 0x42, 0x09, 0xaa, 0x01, 0x06, 0x32, 0x04, 0x10, 0xc0, 0x84, 0x3d, 0x52, 0x16, 0x63, 0x6f, 0x6e,
+ 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x6c, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72,
+ 0x76, 0x61, 0x6c, 0x22, 0xd3, 0x0e, 0x0a, 0x14, 0x48, 0x74, 0x74, 0x70, 0x32, 0x50, 0x72, 0x6f,
+ 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x46, 0x0a, 0x10,
+ 0x68, 0x70, 0x61, 0x63, 0x6b, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56,
+ 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0e, 0x68, 0x70, 0x61, 0x63, 0x6b, 0x54, 0x61, 0x62, 0x6c, 0x65,
+ 0x53, 0x69, 0x7a, 0x65, 0x12, 0x61, 0x0a, 0x16, 0x6d, 0x61, 0x78, 0x5f, 0x63, 0x6f, 0x6e, 0x63,
+ 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c,
+ 0x75, 0x65, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x2a, 0x08, 0x18, 0xff, 0xff, 0xff, 0xff, 0x07, 0x28,
+ 0x01, 0x52, 0x14, 0x6d, 0x61, 0x78, 0x43, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74,
+ 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x73, 0x12, 0x6a, 0x0a, 0x1a, 0x69, 0x6e, 0x69, 0x74, 0x69,
+ 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77,
+ 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49,
+ 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0f, 0xfa, 0x42, 0x0c, 0x2a, 0x0a,
+ 0x18, 0xff, 0xff, 0xff, 0xff, 0x07, 0x28, 0xff, 0xff, 0x03, 0x52, 0x17, 0x69, 0x6e, 0x69, 0x74,
+ 0x69, 0x61, 0x6c, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x53,
+ 0x69, 0x7a, 0x65, 0x12, 0x72, 0x0a, 0x1e, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x63,
+ 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77,
+ 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49,
+ 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0f, 0xfa, 0x42, 0x0c, 0x2a, 0x0a,
+ 0x18, 0xff, 0xff, 0xff, 0xff, 0x07, 0x28, 0xff, 0xff, 0x03, 0x52, 0x1b, 0x69, 0x6e, 0x69, 0x74,
+ 0x69, 0x61, 0x6c, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x69, 0x6e,
+ 0x64, 0x6f, 0x77, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77,
+ 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c,
+ 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x12, 0x25, 0x0a, 0x0e,
+ 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x06,
+ 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x4d, 0x65, 0x74, 0x61, 0x64,
+ 0x61, 0x74, 0x61, 0x12, 0x55, 0x0a, 0x13, 0x6d, 0x61, 0x78, 0x5f, 0x6f, 0x75, 0x74, 0x62, 0x6f,
+ 0x75, 0x6e, 0x64, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07,
+ 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x01, 0x52, 0x11, 0x6d, 0x61, 0x78, 0x4f, 0x75, 0x74, 0x62,
+ 0x6f, 0x75, 0x6e, 0x64, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x64, 0x0a, 0x1b, 0x6d, 0x61,
+ 0x78, 0x5f, 0x6f, 0x75, 0x74, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72,
+ 0x6f, 0x6c, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa,
+ 0x42, 0x04, 0x2a, 0x02, 0x28, 0x01, 0x52, 0x18, 0x6d, 0x61, 0x78, 0x4f, 0x75, 0x74, 0x62, 0x6f,
+ 0x75, 0x6e, 0x64, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73,
+ 0x12, 0x84, 0x01, 0x0a, 0x31, 0x6d, 0x61, 0x78, 0x5f, 0x63, 0x6f, 0x6e, 0x73, 0x65, 0x63, 0x75,
+ 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x66, 0x72, 0x61,
+ 0x6d, 0x65, 0x73, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x5f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x5f, 0x70,
+ 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55,
+ 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x2b, 0x6d, 0x61, 0x78, 0x43,
+ 0x6f, 0x6e, 0x73, 0x65, 0x63, 0x75, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x62, 0x6f, 0x75, 0x6e,
+ 0x64, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x57, 0x69, 0x74, 0x68, 0x45, 0x6d, 0x70, 0x74, 0x79,
+ 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x6f, 0x0a, 0x26, 0x6d, 0x61, 0x78, 0x5f, 0x69,
+ 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x5f,
+ 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61,
+ 0x6d, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32,
+ 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x21, 0x6d, 0x61, 0x78, 0x49, 0x6e, 0x62, 0x6f, 0x75, 0x6e,
+ 0x64, 0x50, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x50,
+ 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x91, 0x01, 0x0a, 0x34, 0x6d, 0x61, 0x78,
+ 0x5f, 0x69, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x5f,
+ 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x5f, 0x70, 0x65,
+ 0x72, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x6e,
+ 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32,
+ 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x2a, 0x02, 0x28, 0x01, 0x52, 0x2c,
+ 0x6d, 0x61, 0x78, 0x49, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77,
+ 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x50, 0x65, 0x72, 0x44,
+ 0x61, 0x74, 0x61, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x53, 0x65, 0x6e, 0x74, 0x12, 0x5e, 0x0a, 0x26,
+ 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6f, 0x6e, 0x5f,
+ 0x69, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x6d, 0x65, 0x73,
+ 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x42, 0x0b, 0x18, 0x01,
+ 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x52, 0x21, 0x73, 0x74, 0x72, 0x65, 0x61,
+ 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x4f, 0x6e, 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x48,
+ 0x74, 0x74, 0x70, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x12, 0x7a, 0x0a, 0x2d,
+ 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f,
+ 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64,
+ 0x5f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x0e, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52,
+ 0x27, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45,
+ 0x72, 0x72, 0x6f, 0x72, 0x4f, 0x6e, 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x48, 0x74, 0x74,
+ 0x70, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x7a, 0x0a, 0x1a, 0x63, 0x75, 0x73, 0x74,
+ 0x6f, 0x6d, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x5f, 0x70, 0x61, 0x72, 0x61,
+ 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x65,
+ 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65,
+ 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x32, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f,
+ 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67,
+ 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x52, 0x18, 0x63, 0x75, 0x73, 0x74,
+ 0x6f, 0x6d, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65,
+ 0x74, 0x65, 0x72, 0x73, 0x12, 0x5a, 0x0a, 0x14, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69,
+ 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x18, 0x0f, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4b, 0x65, 0x65, 0x70, 0x61, 0x6c,
+ 0x69, 0x76, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x13, 0x63, 0x6f, 0x6e,
+ 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65,
+ 0x12, 0x50, 0x0a, 0x11, 0x75, 0x73, 0x65, 0x5f, 0x6f, 0x67, 0x68, 0x74, 0x74, 0x70, 0x32, 0x5f,
+ 0x63, 0x6f, 0x64, 0x65, 0x63, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f,
+ 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x08, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08,
+ 0x01, 0x52, 0x0f, 0x75, 0x73, 0x65, 0x4f, 0x67, 0x68, 0x74, 0x74, 0x70, 0x32, 0x43, 0x6f, 0x64,
+ 0x65, 0x63, 0x1a, 0xe5, 0x01, 0x0a, 0x11, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x50,
+ 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x51, 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e,
+ 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55,
+ 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x13, 0xfa, 0x42, 0x08, 0x2a,
+ 0x06, 0x18, 0xff, 0xff, 0x03, 0x28, 0x00, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52,
+ 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x3c, 0x0a, 0x05, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e,
+ 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02,
+ 0x10, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x3f, 0x9a, 0xc5, 0x88, 0x1e, 0x3a,
+ 0x0a, 0x38, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63,
+ 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x32, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f,
+ 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67,
+ 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x3a, 0x2d, 0x9a, 0xc5, 0x88, 0x1e,
+ 0x28, 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e,
+ 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x32, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63,
+ 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa5, 0x01, 0x0a, 0x13, 0x47, 0x72,
+ 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x12, 0x60, 0x0a, 0x16, 0x68, 0x74, 0x74, 0x70, 0x32, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x63, 0x6f, 0x6c, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x32, 0x50, 0x72,
+ 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x14, 0x68,
+ 0x74, 0x74, 0x70, 0x32, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x3a, 0x2c, 0x9a, 0xc5, 0x88, 0x1e, 0x27, 0x0a, 0x25, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x47, 0x72,
+ 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x22, 0xb1, 0x02, 0x0a, 0x14, 0x48, 0x74, 0x74, 0x70, 0x33, 0x50, 0x72, 0x6f, 0x74, 0x6f,
+ 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, 0x15, 0x71, 0x75,
+ 0x69, 0x63, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x6f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33,
+ 0x2e, 0x51, 0x75, 0x69, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x4f, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x13, 0x71, 0x75, 0x69, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63,
+ 0x6f, 0x6c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x7a, 0x0a, 0x2d, 0x6f, 0x76, 0x65,
+ 0x72, 0x72, 0x69, 0x64, 0x65, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x65, 0x72, 0x72,
+ 0x6f, 0x72, 0x5f, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x68, 0x74,
+ 0x74, 0x70, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x27, 0x6f, 0x76,
+ 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x72, 0x72, 0x6f,
+ 0x72, 0x4f, 0x6e, 0x49, 0x6e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x48, 0x74, 0x74, 0x70, 0x4d, 0x65,
+ 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x3e, 0x0a, 0x16, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x65,
+ 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x18,
+ 0x05, 0x20, 0x01, 0x28, 0x08, 0x42, 0x08, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x52,
+ 0x14, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x64, 0x43, 0x6f,
+ 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x22, 0x74, 0x0a, 0x1a, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x48,
+ 0x65, 0x61, 0x64, 0x65, 0x72, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x12, 0x44, 0x0a, 0x13, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x5f, 0x74, 0x6f,
+ 0x5f, 0x6f, 0x76, 0x65, 0x72, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x42, 0x12, 0xfa, 0x42, 0x0f, 0x72, 0x0d, 0x52, 0x04, 0x68, 0x74, 0x74, 0x70, 0x52, 0x05, 0x68,
+ 0x74, 0x74, 0x70, 0x73, 0x48, 0x00, 0x52, 0x11, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x54, 0x6f,
+ 0x4f, 0x76, 0x65, 0x72, 0x77, 0x72, 0x69, 0x74, 0x65, 0x42, 0x10, 0x0a, 0x0e, 0x74, 0x72, 0x61,
+ 0x6e, 0x73, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x81, 0x01, 0x0a, 0x22,
+ 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e,
+ 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e,
+ 0x76, 0x33, 0x42, 0x0d, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x50, 0x72, 0x6f, 0x74,
+ 0x6f, 0x50, 0x01, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f,
+ 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33,
+ 0x3b, 0x63, 0x6f, 0x72, 0x65, 0x76, 0x33, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62,
+ 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_config_core_v3_protocol_proto_rawDescOnce sync.Once
+ file_envoy_config_core_v3_protocol_proto_rawDescData = file_envoy_config_core_v3_protocol_proto_rawDesc
+)
+
+func file_envoy_config_core_v3_protocol_proto_rawDescGZIP() []byte {
+ file_envoy_config_core_v3_protocol_proto_rawDescOnce.Do(func() {
+ file_envoy_config_core_v3_protocol_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_core_v3_protocol_proto_rawDescData)
+ })
+ return file_envoy_config_core_v3_protocol_proto_rawDescData
+}
+
+var file_envoy_config_core_v3_protocol_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_envoy_config_core_v3_protocol_proto_msgTypes = make([]protoimpl.MessageInfo, 16)
+var file_envoy_config_core_v3_protocol_proto_goTypes = []interface{}{
+ (HttpProtocolOptions_HeadersWithUnderscoresAction)(0), // 0: envoy.config.core.v3.HttpProtocolOptions.HeadersWithUnderscoresAction
+ (*TcpProtocolOptions)(nil), // 1: envoy.config.core.v3.TcpProtocolOptions
+ (*QuicKeepAliveSettings)(nil), // 2: envoy.config.core.v3.QuicKeepAliveSettings
+ (*QuicProtocolOptions)(nil), // 3: envoy.config.core.v3.QuicProtocolOptions
+ (*UpstreamHttpProtocolOptions)(nil), // 4: envoy.config.core.v3.UpstreamHttpProtocolOptions
+ (*AlternateProtocolsCacheOptions)(nil), // 5: envoy.config.core.v3.AlternateProtocolsCacheOptions
+ (*HttpProtocolOptions)(nil), // 6: envoy.config.core.v3.HttpProtocolOptions
+ (*Http1ProtocolOptions)(nil), // 7: envoy.config.core.v3.Http1ProtocolOptions
+ (*KeepaliveSettings)(nil), // 8: envoy.config.core.v3.KeepaliveSettings
+ (*Http2ProtocolOptions)(nil), // 9: envoy.config.core.v3.Http2ProtocolOptions
+ (*GrpcProtocolOptions)(nil), // 10: envoy.config.core.v3.GrpcProtocolOptions
+ (*Http3ProtocolOptions)(nil), // 11: envoy.config.core.v3.Http3ProtocolOptions
+ (*SchemeHeaderTransformation)(nil), // 12: envoy.config.core.v3.SchemeHeaderTransformation
+ (*AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry)(nil), // 13: envoy.config.core.v3.AlternateProtocolsCacheOptions.AlternateProtocolsCacheEntry
+ (*Http1ProtocolOptions_HeaderKeyFormat)(nil), // 14: envoy.config.core.v3.Http1ProtocolOptions.HeaderKeyFormat
+ (*Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords)(nil), // 15: envoy.config.core.v3.Http1ProtocolOptions.HeaderKeyFormat.ProperCaseWords
+ (*Http2ProtocolOptions_SettingsParameter)(nil), // 16: envoy.config.core.v3.Http2ProtocolOptions.SettingsParameter
+ (*durationpb.Duration)(nil), // 17: google.protobuf.Duration
+ (*wrapperspb.UInt32Value)(nil), // 18: google.protobuf.UInt32Value
+ (*TypedExtensionConfig)(nil), // 19: envoy.config.core.v3.TypedExtensionConfig
+ (*wrapperspb.BoolValue)(nil), // 20: google.protobuf.BoolValue
+ (*v3.Percent)(nil), // 21: envoy.type.v3.Percent
+}
+var file_envoy_config_core_v3_protocol_proto_depIdxs = []int32{
+ 17, // 0: envoy.config.core.v3.QuicKeepAliveSettings.max_interval:type_name -> google.protobuf.Duration
+ 17, // 1: envoy.config.core.v3.QuicKeepAliveSettings.initial_interval:type_name -> google.protobuf.Duration
+ 18, // 2: envoy.config.core.v3.QuicProtocolOptions.max_concurrent_streams:type_name -> google.protobuf.UInt32Value
+ 18, // 3: envoy.config.core.v3.QuicProtocolOptions.initial_stream_window_size:type_name -> google.protobuf.UInt32Value
+ 18, // 4: envoy.config.core.v3.QuicProtocolOptions.initial_connection_window_size:type_name -> google.protobuf.UInt32Value
+ 18, // 5: envoy.config.core.v3.QuicProtocolOptions.num_timeouts_to_trigger_port_migration:type_name -> google.protobuf.UInt32Value
+ 2, // 6: envoy.config.core.v3.QuicProtocolOptions.connection_keepalive:type_name -> envoy.config.core.v3.QuicKeepAliveSettings
+ 18, // 7: envoy.config.core.v3.AlternateProtocolsCacheOptions.max_entries:type_name -> google.protobuf.UInt32Value
+ 19, // 8: envoy.config.core.v3.AlternateProtocolsCacheOptions.key_value_store_config:type_name -> envoy.config.core.v3.TypedExtensionConfig
+ 13, // 9: envoy.config.core.v3.AlternateProtocolsCacheOptions.prepopulated_entries:type_name -> envoy.config.core.v3.AlternateProtocolsCacheOptions.AlternateProtocolsCacheEntry
+ 17, // 10: envoy.config.core.v3.HttpProtocolOptions.idle_timeout:type_name -> google.protobuf.Duration
+ 17, // 11: envoy.config.core.v3.HttpProtocolOptions.max_connection_duration:type_name -> google.protobuf.Duration
+ 18, // 12: envoy.config.core.v3.HttpProtocolOptions.max_headers_count:type_name -> google.protobuf.UInt32Value
+ 17, // 13: envoy.config.core.v3.HttpProtocolOptions.max_stream_duration:type_name -> google.protobuf.Duration
+ 0, // 14: envoy.config.core.v3.HttpProtocolOptions.headers_with_underscores_action:type_name -> envoy.config.core.v3.HttpProtocolOptions.HeadersWithUnderscoresAction
+ 18, // 15: envoy.config.core.v3.HttpProtocolOptions.max_requests_per_connection:type_name -> google.protobuf.UInt32Value
+ 20, // 16: envoy.config.core.v3.Http1ProtocolOptions.allow_absolute_url:type_name -> google.protobuf.BoolValue
+ 14, // 17: envoy.config.core.v3.Http1ProtocolOptions.header_key_format:type_name -> envoy.config.core.v3.Http1ProtocolOptions.HeaderKeyFormat
+ 20, // 18: envoy.config.core.v3.Http1ProtocolOptions.override_stream_error_on_invalid_http_message:type_name -> google.protobuf.BoolValue
+ 20, // 19: envoy.config.core.v3.Http1ProtocolOptions.use_balsa_parser:type_name -> google.protobuf.BoolValue
+ 17, // 20: envoy.config.core.v3.KeepaliveSettings.interval:type_name -> google.protobuf.Duration
+ 17, // 21: envoy.config.core.v3.KeepaliveSettings.timeout:type_name -> google.protobuf.Duration
+ 21, // 22: envoy.config.core.v3.KeepaliveSettings.interval_jitter:type_name -> envoy.type.v3.Percent
+ 17, // 23: envoy.config.core.v3.KeepaliveSettings.connection_idle_interval:type_name -> google.protobuf.Duration
+ 18, // 24: envoy.config.core.v3.Http2ProtocolOptions.hpack_table_size:type_name -> google.protobuf.UInt32Value
+ 18, // 25: envoy.config.core.v3.Http2ProtocolOptions.max_concurrent_streams:type_name -> google.protobuf.UInt32Value
+ 18, // 26: envoy.config.core.v3.Http2ProtocolOptions.initial_stream_window_size:type_name -> google.protobuf.UInt32Value
+ 18, // 27: envoy.config.core.v3.Http2ProtocolOptions.initial_connection_window_size:type_name -> google.protobuf.UInt32Value
+ 18, // 28: envoy.config.core.v3.Http2ProtocolOptions.max_outbound_frames:type_name -> google.protobuf.UInt32Value
+ 18, // 29: envoy.config.core.v3.Http2ProtocolOptions.max_outbound_control_frames:type_name -> google.protobuf.UInt32Value
+ 18, // 30: envoy.config.core.v3.Http2ProtocolOptions.max_consecutive_inbound_frames_with_empty_payload:type_name -> google.protobuf.UInt32Value
+ 18, // 31: envoy.config.core.v3.Http2ProtocolOptions.max_inbound_priority_frames_per_stream:type_name -> google.protobuf.UInt32Value
+ 18, // 32: envoy.config.core.v3.Http2ProtocolOptions.max_inbound_window_update_frames_per_data_frame_sent:type_name -> google.protobuf.UInt32Value
+ 20, // 33: envoy.config.core.v3.Http2ProtocolOptions.override_stream_error_on_invalid_http_message:type_name -> google.protobuf.BoolValue
+ 16, // 34: envoy.config.core.v3.Http2ProtocolOptions.custom_settings_parameters:type_name -> envoy.config.core.v3.Http2ProtocolOptions.SettingsParameter
+ 8, // 35: envoy.config.core.v3.Http2ProtocolOptions.connection_keepalive:type_name -> envoy.config.core.v3.KeepaliveSettings
+ 20, // 36: envoy.config.core.v3.Http2ProtocolOptions.use_oghttp2_codec:type_name -> google.protobuf.BoolValue
+ 9, // 37: envoy.config.core.v3.GrpcProtocolOptions.http2_protocol_options:type_name -> envoy.config.core.v3.Http2ProtocolOptions
+ 3, // 38: envoy.config.core.v3.Http3ProtocolOptions.quic_protocol_options:type_name -> envoy.config.core.v3.QuicProtocolOptions
+ 20, // 39: envoy.config.core.v3.Http3ProtocolOptions.override_stream_error_on_invalid_http_message:type_name -> google.protobuf.BoolValue
+ 15, // 40: envoy.config.core.v3.Http1ProtocolOptions.HeaderKeyFormat.proper_case_words:type_name -> envoy.config.core.v3.Http1ProtocolOptions.HeaderKeyFormat.ProperCaseWords
+ 19, // 41: envoy.config.core.v3.Http1ProtocolOptions.HeaderKeyFormat.stateful_formatter:type_name -> envoy.config.core.v3.TypedExtensionConfig
+ 18, // 42: envoy.config.core.v3.Http2ProtocolOptions.SettingsParameter.identifier:type_name -> google.protobuf.UInt32Value
+ 18, // 43: envoy.config.core.v3.Http2ProtocolOptions.SettingsParameter.value:type_name -> google.protobuf.UInt32Value
+ 44, // [44:44] is the sub-list for method output_type
+ 44, // [44:44] is the sub-list for method input_type
+ 44, // [44:44] is the sub-list for extension type_name
+ 44, // [44:44] is the sub-list for extension extendee
+ 0, // [0:44] is the sub-list for field type_name
+}
+
+func init() { file_envoy_config_core_v3_protocol_proto_init() }
+func file_envoy_config_core_v3_protocol_proto_init() {
+ if File_envoy_config_core_v3_protocol_proto != nil {
+ return
+ }
+ file_envoy_config_core_v3_extension_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_config_core_v3_protocol_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*TcpProtocolOptions); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_protocol_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*QuicKeepAliveSettings); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_protocol_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*QuicProtocolOptions); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_protocol_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*UpstreamHttpProtocolOptions); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_protocol_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AlternateProtocolsCacheOptions); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_protocol_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*HttpProtocolOptions); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_protocol_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Http1ProtocolOptions); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_protocol_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*KeepaliveSettings); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_protocol_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Http2ProtocolOptions); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_protocol_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GrpcProtocolOptions); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_protocol_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Http3ProtocolOptions); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_protocol_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SchemeHeaderTransformation); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_protocol_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_protocol_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Http1ProtocolOptions_HeaderKeyFormat); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_protocol_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_protocol_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Http2ProtocolOptions_SettingsParameter); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_envoy_config_core_v3_protocol_proto_msgTypes[11].OneofWrappers = []interface{}{
+ (*SchemeHeaderTransformation_SchemeToOverwrite)(nil),
+ }
+ file_envoy_config_core_v3_protocol_proto_msgTypes[13].OneofWrappers = []interface{}{
+ (*Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords_)(nil),
+ (*Http1ProtocolOptions_HeaderKeyFormat_StatefulFormatter)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_config_core_v3_protocol_proto_rawDesc,
+ NumEnums: 1,
+ NumMessages: 16,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_config_core_v3_protocol_proto_goTypes,
+ DependencyIndexes: file_envoy_config_core_v3_protocol_proto_depIdxs,
+ EnumInfos: file_envoy_config_core_v3_protocol_proto_enumTypes,
+ MessageInfos: file_envoy_config_core_v3_protocol_proto_msgTypes,
+ }.Build()
+ File_envoy_config_core_v3_protocol_proto = out.File
+ file_envoy_config_core_v3_protocol_proto_rawDesc = nil
+ file_envoy_config_core_v3_protocol_proto_goTypes = nil
+ file_envoy_config_core_v3_protocol_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/protocol.pb.validate.go b/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/protocol.pb.validate.go
new file mode 100644
index 000000000..9e9a9155e
--- /dev/null
+++ b/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/protocol.pb.validate.go
@@ -0,0 +1,2975 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/config/core/v3/protocol.proto
+
+package corev3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on TcpProtocolOptions with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *TcpProtocolOptions) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on TcpProtocolOptions with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// TcpProtocolOptionsMultiError, or nil if none found.
+func (m *TcpProtocolOptions) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *TcpProtocolOptions) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if len(errors) > 0 {
+ return TcpProtocolOptionsMultiError(errors)
+ }
+
+ return nil
+}
+
+// TcpProtocolOptionsMultiError is an error wrapping multiple validation errors
+// returned by TcpProtocolOptions.ValidateAll() if the designated constraints
+// aren't met.
+type TcpProtocolOptionsMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m TcpProtocolOptionsMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m TcpProtocolOptionsMultiError) AllErrors() []error { return m }
+
+// TcpProtocolOptionsValidationError is the validation error returned by
+// TcpProtocolOptions.Validate if the designated constraints aren't met.
+type TcpProtocolOptionsValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e TcpProtocolOptionsValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e TcpProtocolOptionsValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e TcpProtocolOptionsValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e TcpProtocolOptionsValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e TcpProtocolOptionsValidationError) ErrorName() string {
+ return "TcpProtocolOptionsValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e TcpProtocolOptionsValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sTcpProtocolOptions.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = TcpProtocolOptionsValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = TcpProtocolOptionsValidationError{}
+
+// Validate checks the field values on QuicKeepAliveSettings with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *QuicKeepAliveSettings) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on QuicKeepAliveSettings with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// QuicKeepAliveSettingsMultiError, or nil if none found.
+func (m *QuicKeepAliveSettings) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *QuicKeepAliveSettings) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if d := m.GetMaxInterval(); d != nil {
+ dur, err := d.AsDuration(), d.CheckValid()
+ if err != nil {
+ err = QuicKeepAliveSettingsValidationError{
+ field: "MaxInterval",
+ reason: "value is not a valid duration",
+ cause: err,
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ } else {
+
+ lte := time.Duration(0*time.Second + 0*time.Nanosecond)
+ gte := time.Duration(1*time.Second + 0*time.Nanosecond)
+
+ if dur > lte && dur < gte {
+ err := QuicKeepAliveSettingsValidationError{
+ field: "MaxInterval",
+ reason: "value must be outside range (0s, 1s)",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ }
+ }
+
+ if d := m.GetInitialInterval(); d != nil {
+ dur, err := d.AsDuration(), d.CheckValid()
+ if err != nil {
+ err = QuicKeepAliveSettingsValidationError{
+ field: "InitialInterval",
+ reason: "value is not a valid duration",
+ cause: err,
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ } else {
+
+ lte := time.Duration(0*time.Second + 0*time.Nanosecond)
+ gte := time.Duration(1*time.Second + 0*time.Nanosecond)
+
+ if dur > lte && dur < gte {
+ err := QuicKeepAliveSettingsValidationError{
+ field: "InitialInterval",
+ reason: "value must be outside range (0s, 1s)",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ }
+ }
+
+ if len(errors) > 0 {
+ return QuicKeepAliveSettingsMultiError(errors)
+ }
+
+ return nil
+}
+
+// QuicKeepAliveSettingsMultiError is an error wrapping multiple validation
+// errors returned by QuicKeepAliveSettings.ValidateAll() if the designated
+// constraints aren't met.
+type QuicKeepAliveSettingsMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m QuicKeepAliveSettingsMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m QuicKeepAliveSettingsMultiError) AllErrors() []error { return m }
+
+// QuicKeepAliveSettingsValidationError is the validation error returned by
+// QuicKeepAliveSettings.Validate if the designated constraints aren't met.
+type QuicKeepAliveSettingsValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e QuicKeepAliveSettingsValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e QuicKeepAliveSettingsValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e QuicKeepAliveSettingsValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e QuicKeepAliveSettingsValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e QuicKeepAliveSettingsValidationError) ErrorName() string {
+ return "QuicKeepAliveSettingsValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e QuicKeepAliveSettingsValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sQuicKeepAliveSettings.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = QuicKeepAliveSettingsValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = QuicKeepAliveSettingsValidationError{}
+
+// Validate checks the field values on QuicProtocolOptions with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *QuicProtocolOptions) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on QuicProtocolOptions with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// QuicProtocolOptionsMultiError, or nil if none found.
+func (m *QuicProtocolOptions) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *QuicProtocolOptions) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if wrapper := m.GetMaxConcurrentStreams(); wrapper != nil {
+
+ if wrapper.GetValue() < 1 {
+ err := QuicProtocolOptionsValidationError{
+ field: "MaxConcurrentStreams",
+ reason: "value must be greater than or equal to 1",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ }
+
+ if wrapper := m.GetInitialStreamWindowSize(); wrapper != nil {
+
+ if val := wrapper.GetValue(); val < 1 || val > 16777216 {
+ err := QuicProtocolOptionsValidationError{
+ field: "InitialStreamWindowSize",
+ reason: "value must be inside range [1, 16777216]",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ }
+
+ if wrapper := m.GetInitialConnectionWindowSize(); wrapper != nil {
+
+ if val := wrapper.GetValue(); val < 1 || val > 25165824 {
+ err := QuicProtocolOptionsValidationError{
+ field: "InitialConnectionWindowSize",
+ reason: "value must be inside range [1, 25165824]",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ }
+
+ if wrapper := m.GetNumTimeoutsToTriggerPortMigration(); wrapper != nil {
+
+ if val := wrapper.GetValue(); val < 0 || val > 5 {
+ err := QuicProtocolOptionsValidationError{
+ field: "NumTimeoutsToTriggerPortMigration",
+ reason: "value must be inside range [0, 5]",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ }
+
+ if all {
+ switch v := interface{}(m.GetConnectionKeepalive()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, QuicProtocolOptionsValidationError{
+ field: "ConnectionKeepalive",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, QuicProtocolOptionsValidationError{
+ field: "ConnectionKeepalive",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetConnectionKeepalive()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return QuicProtocolOptionsValidationError{
+ field: "ConnectionKeepalive",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return QuicProtocolOptionsMultiError(errors)
+ }
+
+ return nil
+}
+
+// QuicProtocolOptionsMultiError is an error wrapping multiple validation
+// errors returned by QuicProtocolOptions.ValidateAll() if the designated
+// constraints aren't met.
+type QuicProtocolOptionsMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m QuicProtocolOptionsMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m QuicProtocolOptionsMultiError) AllErrors() []error { return m }
+
+// QuicProtocolOptionsValidationError is the validation error returned by
+// QuicProtocolOptions.Validate if the designated constraints aren't met.
+type QuicProtocolOptionsValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e QuicProtocolOptionsValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e QuicProtocolOptionsValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e QuicProtocolOptionsValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e QuicProtocolOptionsValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e QuicProtocolOptionsValidationError) ErrorName() string {
+ return "QuicProtocolOptionsValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e QuicProtocolOptionsValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sQuicProtocolOptions.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = QuicProtocolOptionsValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = QuicProtocolOptionsValidationError{}
+
+// Validate checks the field values on UpstreamHttpProtocolOptions with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *UpstreamHttpProtocolOptions) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on UpstreamHttpProtocolOptions with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// UpstreamHttpProtocolOptionsMultiError, or nil if none found.
+func (m *UpstreamHttpProtocolOptions) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *UpstreamHttpProtocolOptions) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for AutoSni
+
+ // no validation rules for AutoSanValidation
+
+ if m.GetOverrideAutoSniHeader() != "" {
+
+ if !_UpstreamHttpProtocolOptions_OverrideAutoSniHeader_Pattern.MatchString(m.GetOverrideAutoSniHeader()) {
+ err := UpstreamHttpProtocolOptionsValidationError{
+ field: "OverrideAutoSniHeader",
+ reason: "value does not match regex pattern \"^:?[0-9a-zA-Z!#$%&'*+-.^_|~`]+$\"",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return UpstreamHttpProtocolOptionsMultiError(errors)
+ }
+
+ return nil
+}
+
+// UpstreamHttpProtocolOptionsMultiError is an error wrapping multiple
+// validation errors returned by UpstreamHttpProtocolOptions.ValidateAll() if
+// the designated constraints aren't met.
+type UpstreamHttpProtocolOptionsMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m UpstreamHttpProtocolOptionsMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m UpstreamHttpProtocolOptionsMultiError) AllErrors() []error { return m }
+
+// UpstreamHttpProtocolOptionsValidationError is the validation error returned
+// by UpstreamHttpProtocolOptions.Validate if the designated constraints
+// aren't met.
+type UpstreamHttpProtocolOptionsValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e UpstreamHttpProtocolOptionsValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e UpstreamHttpProtocolOptionsValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e UpstreamHttpProtocolOptionsValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e UpstreamHttpProtocolOptionsValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e UpstreamHttpProtocolOptionsValidationError) ErrorName() string {
+ return "UpstreamHttpProtocolOptionsValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e UpstreamHttpProtocolOptionsValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sUpstreamHttpProtocolOptions.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = UpstreamHttpProtocolOptionsValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = UpstreamHttpProtocolOptionsValidationError{}
+
+var _UpstreamHttpProtocolOptions_OverrideAutoSniHeader_Pattern = regexp.MustCompile("^:?[0-9a-zA-Z!#$%&'*+-.^_|~`]+$")
+
+// Validate checks the field values on AlternateProtocolsCacheOptions with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *AlternateProtocolsCacheOptions) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on AlternateProtocolsCacheOptions with
+// the rules defined in the proto definition for this message. If any rules
+// are violated, the result is a list of violation errors wrapped in
+// AlternateProtocolsCacheOptionsMultiError, or nil if none found.
+func (m *AlternateProtocolsCacheOptions) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *AlternateProtocolsCacheOptions) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if utf8.RuneCountInString(m.GetName()) < 1 {
+ err := AlternateProtocolsCacheOptionsValidationError{
+ field: "Name",
+ reason: "value length must be at least 1 runes",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if wrapper := m.GetMaxEntries(); wrapper != nil {
+
+ if wrapper.GetValue() <= 0 {
+ err := AlternateProtocolsCacheOptionsValidationError{
+ field: "MaxEntries",
+ reason: "value must be greater than 0",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ }
+
+ if all {
+ switch v := interface{}(m.GetKeyValueStoreConfig()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, AlternateProtocolsCacheOptionsValidationError{
+ field: "KeyValueStoreConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, AlternateProtocolsCacheOptionsValidationError{
+ field: "KeyValueStoreConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetKeyValueStoreConfig()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return AlternateProtocolsCacheOptionsValidationError{
+ field: "KeyValueStoreConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ for idx, item := range m.GetPrepopulatedEntries() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, AlternateProtocolsCacheOptionsValidationError{
+ field: fmt.Sprintf("PrepopulatedEntries[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, AlternateProtocolsCacheOptionsValidationError{
+ field: fmt.Sprintf("PrepopulatedEntries[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return AlternateProtocolsCacheOptionsValidationError{
+ field: fmt.Sprintf("PrepopulatedEntries[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return AlternateProtocolsCacheOptionsMultiError(errors)
+ }
+
+ return nil
+}
+
+// AlternateProtocolsCacheOptionsMultiError is an error wrapping multiple
+// validation errors returned by AlternateProtocolsCacheOptions.ValidateAll()
+// if the designated constraints aren't met.
+type AlternateProtocolsCacheOptionsMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m AlternateProtocolsCacheOptionsMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m AlternateProtocolsCacheOptionsMultiError) AllErrors() []error { return m }
+
+// AlternateProtocolsCacheOptionsValidationError is the validation error
+// returned by AlternateProtocolsCacheOptions.Validate if the designated
+// constraints aren't met.
+type AlternateProtocolsCacheOptionsValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e AlternateProtocolsCacheOptionsValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e AlternateProtocolsCacheOptionsValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e AlternateProtocolsCacheOptionsValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e AlternateProtocolsCacheOptionsValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e AlternateProtocolsCacheOptionsValidationError) ErrorName() string {
+ return "AlternateProtocolsCacheOptionsValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e AlternateProtocolsCacheOptionsValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sAlternateProtocolsCacheOptions.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = AlternateProtocolsCacheOptionsValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = AlternateProtocolsCacheOptionsValidationError{}
+
+// Validate checks the field values on HttpProtocolOptions with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *HttpProtocolOptions) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on HttpProtocolOptions with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// HttpProtocolOptionsMultiError, or nil if none found.
+func (m *HttpProtocolOptions) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *HttpProtocolOptions) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if all {
+ switch v := interface{}(m.GetIdleTimeout()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, HttpProtocolOptionsValidationError{
+ field: "IdleTimeout",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, HttpProtocolOptionsValidationError{
+ field: "IdleTimeout",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetIdleTimeout()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HttpProtocolOptionsValidationError{
+ field: "IdleTimeout",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetMaxConnectionDuration()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, HttpProtocolOptionsValidationError{
+ field: "MaxConnectionDuration",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, HttpProtocolOptionsValidationError{
+ field: "MaxConnectionDuration",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetMaxConnectionDuration()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HttpProtocolOptionsValidationError{
+ field: "MaxConnectionDuration",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if wrapper := m.GetMaxHeadersCount(); wrapper != nil {
+
+ if wrapper.GetValue() < 1 {
+ err := HttpProtocolOptionsValidationError{
+ field: "MaxHeadersCount",
+ reason: "value must be greater than or equal to 1",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ }
+
+ if all {
+ switch v := interface{}(m.GetMaxStreamDuration()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, HttpProtocolOptionsValidationError{
+ field: "MaxStreamDuration",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, HttpProtocolOptionsValidationError{
+ field: "MaxStreamDuration",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetMaxStreamDuration()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HttpProtocolOptionsValidationError{
+ field: "MaxStreamDuration",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for HeadersWithUnderscoresAction
+
+ if all {
+ switch v := interface{}(m.GetMaxRequestsPerConnection()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, HttpProtocolOptionsValidationError{
+ field: "MaxRequestsPerConnection",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, HttpProtocolOptionsValidationError{
+ field: "MaxRequestsPerConnection",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetMaxRequestsPerConnection()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HttpProtocolOptionsValidationError{
+ field: "MaxRequestsPerConnection",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return HttpProtocolOptionsMultiError(errors)
+ }
+
+ return nil
+}
+
+// HttpProtocolOptionsMultiError is an error wrapping multiple validation
+// errors returned by HttpProtocolOptions.ValidateAll() if the designated
+// constraints aren't met.
+type HttpProtocolOptionsMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m HttpProtocolOptionsMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m HttpProtocolOptionsMultiError) AllErrors() []error { return m }
+
+// HttpProtocolOptionsValidationError is the validation error returned by
+// HttpProtocolOptions.Validate if the designated constraints aren't met.
+type HttpProtocolOptionsValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e HttpProtocolOptionsValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e HttpProtocolOptionsValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e HttpProtocolOptionsValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e HttpProtocolOptionsValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e HttpProtocolOptionsValidationError) ErrorName() string {
+ return "HttpProtocolOptionsValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e HttpProtocolOptionsValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sHttpProtocolOptions.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = HttpProtocolOptionsValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = HttpProtocolOptionsValidationError{}
+
+// Validate checks the field values on Http1ProtocolOptions with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *Http1ProtocolOptions) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on Http1ProtocolOptions with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// Http1ProtocolOptionsMultiError, or nil if none found.
+func (m *Http1ProtocolOptions) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Http1ProtocolOptions) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if all {
+ switch v := interface{}(m.GetAllowAbsoluteUrl()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Http1ProtocolOptionsValidationError{
+ field: "AllowAbsoluteUrl",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Http1ProtocolOptionsValidationError{
+ field: "AllowAbsoluteUrl",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetAllowAbsoluteUrl()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Http1ProtocolOptionsValidationError{
+ field: "AllowAbsoluteUrl",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for AcceptHttp_10
+
+ // no validation rules for DefaultHostForHttp_10
+
+ if all {
+ switch v := interface{}(m.GetHeaderKeyFormat()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Http1ProtocolOptionsValidationError{
+ field: "HeaderKeyFormat",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Http1ProtocolOptionsValidationError{
+ field: "HeaderKeyFormat",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetHeaderKeyFormat()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Http1ProtocolOptionsValidationError{
+ field: "HeaderKeyFormat",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for EnableTrailers
+
+ // no validation rules for AllowChunkedLength
+
+ if all {
+ switch v := interface{}(m.GetOverrideStreamErrorOnInvalidHttpMessage()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Http1ProtocolOptionsValidationError{
+ field: "OverrideStreamErrorOnInvalidHttpMessage",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Http1ProtocolOptionsValidationError{
+ field: "OverrideStreamErrorOnInvalidHttpMessage",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetOverrideStreamErrorOnInvalidHttpMessage()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Http1ProtocolOptionsValidationError{
+ field: "OverrideStreamErrorOnInvalidHttpMessage",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for SendFullyQualifiedUrl
+
+ if all {
+ switch v := interface{}(m.GetUseBalsaParser()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Http1ProtocolOptionsValidationError{
+ field: "UseBalsaParser",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Http1ProtocolOptionsValidationError{
+ field: "UseBalsaParser",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetUseBalsaParser()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Http1ProtocolOptionsValidationError{
+ field: "UseBalsaParser",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for AllowCustomMethods
+
+ if len(errors) > 0 {
+ return Http1ProtocolOptionsMultiError(errors)
+ }
+
+ return nil
+}
+
+// Http1ProtocolOptionsMultiError is an error wrapping multiple validation
+// errors returned by Http1ProtocolOptions.ValidateAll() if the designated
+// constraints aren't met.
+type Http1ProtocolOptionsMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m Http1ProtocolOptionsMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m Http1ProtocolOptionsMultiError) AllErrors() []error { return m }
+
+// Http1ProtocolOptionsValidationError is the validation error returned by
+// Http1ProtocolOptions.Validate if the designated constraints aren't met.
+type Http1ProtocolOptionsValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e Http1ProtocolOptionsValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e Http1ProtocolOptionsValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e Http1ProtocolOptionsValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e Http1ProtocolOptionsValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e Http1ProtocolOptionsValidationError) ErrorName() string {
+ return "Http1ProtocolOptionsValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e Http1ProtocolOptionsValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sHttp1ProtocolOptions.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = Http1ProtocolOptionsValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = Http1ProtocolOptionsValidationError{}
+
+// Validate checks the field values on KeepaliveSettings with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// first error encountered is returned, or nil if there are no violations.
+func (m *KeepaliveSettings) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on KeepaliveSettings with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// KeepaliveSettingsMultiError, or nil if none found.
+func (m *KeepaliveSettings) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *KeepaliveSettings) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if d := m.GetInterval(); d != nil {
+ dur, err := d.AsDuration(), d.CheckValid()
+ if err != nil {
+ err = KeepaliveSettingsValidationError{
+ field: "Interval",
+ reason: "value is not a valid duration",
+ cause: err,
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ } else {
+
+ gte := time.Duration(0*time.Second + 1000000*time.Nanosecond)
+
+ if dur < gte {
+ err := KeepaliveSettingsValidationError{
+ field: "Interval",
+ reason: "value must be greater than or equal to 1ms",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ }
+ }
+
+ if m.GetTimeout() == nil {
+ err := KeepaliveSettingsValidationError{
+ field: "Timeout",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if d := m.GetTimeout(); d != nil {
+ dur, err := d.AsDuration(), d.CheckValid()
+ if err != nil {
+ err = KeepaliveSettingsValidationError{
+ field: "Timeout",
+ reason: "value is not a valid duration",
+ cause: err,
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ } else {
+
+ gte := time.Duration(0*time.Second + 1000000*time.Nanosecond)
+
+ if dur < gte {
+ err := KeepaliveSettingsValidationError{
+ field: "Timeout",
+ reason: "value must be greater than or equal to 1ms",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetIntervalJitter()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, KeepaliveSettingsValidationError{
+ field: "IntervalJitter",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, KeepaliveSettingsValidationError{
+ field: "IntervalJitter",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetIntervalJitter()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return KeepaliveSettingsValidationError{
+ field: "IntervalJitter",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if d := m.GetConnectionIdleInterval(); d != nil {
+ dur, err := d.AsDuration(), d.CheckValid()
+ if err != nil {
+ err = KeepaliveSettingsValidationError{
+ field: "ConnectionIdleInterval",
+ reason: "value is not a valid duration",
+ cause: err,
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ } else {
+
+ gte := time.Duration(0*time.Second + 1000000*time.Nanosecond)
+
+ if dur < gte {
+ err := KeepaliveSettingsValidationError{
+ field: "ConnectionIdleInterval",
+ reason: "value must be greater than or equal to 1ms",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ }
+ }
+
+ if len(errors) > 0 {
+ return KeepaliveSettingsMultiError(errors)
+ }
+
+ return nil
+}
+
+// KeepaliveSettingsMultiError is an error wrapping multiple validation errors
+// returned by KeepaliveSettings.ValidateAll() if the designated constraints
+// aren't met.
+type KeepaliveSettingsMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m KeepaliveSettingsMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m KeepaliveSettingsMultiError) AllErrors() []error { return m }
+
+// KeepaliveSettingsValidationError is the validation error returned by
+// KeepaliveSettings.Validate if the designated constraints aren't met.
+type KeepaliveSettingsValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e KeepaliveSettingsValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e KeepaliveSettingsValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e KeepaliveSettingsValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e KeepaliveSettingsValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e KeepaliveSettingsValidationError) ErrorName() string {
+ return "KeepaliveSettingsValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e KeepaliveSettingsValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sKeepaliveSettings.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = KeepaliveSettingsValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = KeepaliveSettingsValidationError{}
+
+// Validate checks the field values on Http2ProtocolOptions with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *Http2ProtocolOptions) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on Http2ProtocolOptions with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// Http2ProtocolOptionsMultiError, or nil if none found.
+func (m *Http2ProtocolOptions) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Http2ProtocolOptions) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if all {
+ switch v := interface{}(m.GetHpackTableSize()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Http2ProtocolOptionsValidationError{
+ field: "HpackTableSize",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Http2ProtocolOptionsValidationError{
+ field: "HpackTableSize",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetHpackTableSize()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Http2ProtocolOptionsValidationError{
+ field: "HpackTableSize",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if wrapper := m.GetMaxConcurrentStreams(); wrapper != nil {
+
+ if val := wrapper.GetValue(); val < 1 || val > 2147483647 {
+ err := Http2ProtocolOptionsValidationError{
+ field: "MaxConcurrentStreams",
+ reason: "value must be inside range [1, 2147483647]",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ }
+
+ if wrapper := m.GetInitialStreamWindowSize(); wrapper != nil {
+
+ if val := wrapper.GetValue(); val < 65535 || val > 2147483647 {
+ err := Http2ProtocolOptionsValidationError{
+ field: "InitialStreamWindowSize",
+ reason: "value must be inside range [65535, 2147483647]",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ }
+
+ if wrapper := m.GetInitialConnectionWindowSize(); wrapper != nil {
+
+ if val := wrapper.GetValue(); val < 65535 || val > 2147483647 {
+ err := Http2ProtocolOptionsValidationError{
+ field: "InitialConnectionWindowSize",
+ reason: "value must be inside range [65535, 2147483647]",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ }
+
+ // no validation rules for AllowConnect
+
+ // no validation rules for AllowMetadata
+
+ if wrapper := m.GetMaxOutboundFrames(); wrapper != nil {
+
+ if wrapper.GetValue() < 1 {
+ err := Http2ProtocolOptionsValidationError{
+ field: "MaxOutboundFrames",
+ reason: "value must be greater than or equal to 1",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ }
+
+ if wrapper := m.GetMaxOutboundControlFrames(); wrapper != nil {
+
+ if wrapper.GetValue() < 1 {
+ err := Http2ProtocolOptionsValidationError{
+ field: "MaxOutboundControlFrames",
+ reason: "value must be greater than or equal to 1",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ }
+
+ if all {
+ switch v := interface{}(m.GetMaxConsecutiveInboundFramesWithEmptyPayload()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Http2ProtocolOptionsValidationError{
+ field: "MaxConsecutiveInboundFramesWithEmptyPayload",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Http2ProtocolOptionsValidationError{
+ field: "MaxConsecutiveInboundFramesWithEmptyPayload",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetMaxConsecutiveInboundFramesWithEmptyPayload()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Http2ProtocolOptionsValidationError{
+ field: "MaxConsecutiveInboundFramesWithEmptyPayload",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetMaxInboundPriorityFramesPerStream()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Http2ProtocolOptionsValidationError{
+ field: "MaxInboundPriorityFramesPerStream",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Http2ProtocolOptionsValidationError{
+ field: "MaxInboundPriorityFramesPerStream",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetMaxInboundPriorityFramesPerStream()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Http2ProtocolOptionsValidationError{
+ field: "MaxInboundPriorityFramesPerStream",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if wrapper := m.GetMaxInboundWindowUpdateFramesPerDataFrameSent(); wrapper != nil {
+
+ if wrapper.GetValue() < 1 {
+ err := Http2ProtocolOptionsValidationError{
+ field: "MaxInboundWindowUpdateFramesPerDataFrameSent",
+ reason: "value must be greater than or equal to 1",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ }
+
+ // no validation rules for StreamErrorOnInvalidHttpMessaging
+
+ if all {
+ switch v := interface{}(m.GetOverrideStreamErrorOnInvalidHttpMessage()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Http2ProtocolOptionsValidationError{
+ field: "OverrideStreamErrorOnInvalidHttpMessage",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Http2ProtocolOptionsValidationError{
+ field: "OverrideStreamErrorOnInvalidHttpMessage",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetOverrideStreamErrorOnInvalidHttpMessage()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Http2ProtocolOptionsValidationError{
+ field: "OverrideStreamErrorOnInvalidHttpMessage",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ for idx, item := range m.GetCustomSettingsParameters() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Http2ProtocolOptionsValidationError{
+ field: fmt.Sprintf("CustomSettingsParameters[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Http2ProtocolOptionsValidationError{
+ field: fmt.Sprintf("CustomSettingsParameters[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Http2ProtocolOptionsValidationError{
+ field: fmt.Sprintf("CustomSettingsParameters[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if all {
+ switch v := interface{}(m.GetConnectionKeepalive()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Http2ProtocolOptionsValidationError{
+ field: "ConnectionKeepalive",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Http2ProtocolOptionsValidationError{
+ field: "ConnectionKeepalive",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetConnectionKeepalive()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Http2ProtocolOptionsValidationError{
+ field: "ConnectionKeepalive",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetUseOghttp2Codec()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Http2ProtocolOptionsValidationError{
+ field: "UseOghttp2Codec",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Http2ProtocolOptionsValidationError{
+ field: "UseOghttp2Codec",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetUseOghttp2Codec()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Http2ProtocolOptionsValidationError{
+ field: "UseOghttp2Codec",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return Http2ProtocolOptionsMultiError(errors)
+ }
+
+ return nil
+}
+
+// Http2ProtocolOptionsMultiError is an error wrapping multiple validation
+// errors returned by Http2ProtocolOptions.ValidateAll() if the designated
+// constraints aren't met.
+type Http2ProtocolOptionsMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m Http2ProtocolOptionsMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m Http2ProtocolOptionsMultiError) AllErrors() []error { return m }
+
+// Http2ProtocolOptionsValidationError is the validation error returned by
+// Http2ProtocolOptions.Validate if the designated constraints aren't met.
+type Http2ProtocolOptionsValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e Http2ProtocolOptionsValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e Http2ProtocolOptionsValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e Http2ProtocolOptionsValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e Http2ProtocolOptionsValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e Http2ProtocolOptionsValidationError) ErrorName() string {
+ return "Http2ProtocolOptionsValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e Http2ProtocolOptionsValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sHttp2ProtocolOptions.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = Http2ProtocolOptionsValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = Http2ProtocolOptionsValidationError{}
+
+// Validate checks the field values on GrpcProtocolOptions with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *GrpcProtocolOptions) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on GrpcProtocolOptions with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// GrpcProtocolOptionsMultiError, or nil if none found.
+func (m *GrpcProtocolOptions) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *GrpcProtocolOptions) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if all {
+ switch v := interface{}(m.GetHttp2ProtocolOptions()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, GrpcProtocolOptionsValidationError{
+ field: "Http2ProtocolOptions",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, GrpcProtocolOptionsValidationError{
+ field: "Http2ProtocolOptions",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetHttp2ProtocolOptions()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return GrpcProtocolOptionsValidationError{
+ field: "Http2ProtocolOptions",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return GrpcProtocolOptionsMultiError(errors)
+ }
+
+ return nil
+}
+
+// GrpcProtocolOptionsMultiError is an error wrapping multiple validation
+// errors returned by GrpcProtocolOptions.ValidateAll() if the designated
+// constraints aren't met.
+type GrpcProtocolOptionsMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m GrpcProtocolOptionsMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m GrpcProtocolOptionsMultiError) AllErrors() []error { return m }
+
+// GrpcProtocolOptionsValidationError is the validation error returned by
+// GrpcProtocolOptions.Validate if the designated constraints aren't met.
+type GrpcProtocolOptionsValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e GrpcProtocolOptionsValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e GrpcProtocolOptionsValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e GrpcProtocolOptionsValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e GrpcProtocolOptionsValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e GrpcProtocolOptionsValidationError) ErrorName() string {
+ return "GrpcProtocolOptionsValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e GrpcProtocolOptionsValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sGrpcProtocolOptions.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = GrpcProtocolOptionsValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = GrpcProtocolOptionsValidationError{}
+
+// Validate checks the field values on Http3ProtocolOptions with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *Http3ProtocolOptions) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on Http3ProtocolOptions with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// Http3ProtocolOptionsMultiError, or nil if none found.
+func (m *Http3ProtocolOptions) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Http3ProtocolOptions) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if all {
+ switch v := interface{}(m.GetQuicProtocolOptions()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Http3ProtocolOptionsValidationError{
+ field: "QuicProtocolOptions",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Http3ProtocolOptionsValidationError{
+ field: "QuicProtocolOptions",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetQuicProtocolOptions()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Http3ProtocolOptionsValidationError{
+ field: "QuicProtocolOptions",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetOverrideStreamErrorOnInvalidHttpMessage()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Http3ProtocolOptionsValidationError{
+ field: "OverrideStreamErrorOnInvalidHttpMessage",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Http3ProtocolOptionsValidationError{
+ field: "OverrideStreamErrorOnInvalidHttpMessage",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetOverrideStreamErrorOnInvalidHttpMessage()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Http3ProtocolOptionsValidationError{
+ field: "OverrideStreamErrorOnInvalidHttpMessage",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for AllowExtendedConnect
+
+ if len(errors) > 0 {
+ return Http3ProtocolOptionsMultiError(errors)
+ }
+
+ return nil
+}
+
+// Http3ProtocolOptionsMultiError is an error wrapping multiple validation
+// errors returned by Http3ProtocolOptions.ValidateAll() if the designated
+// constraints aren't met.
+type Http3ProtocolOptionsMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m Http3ProtocolOptionsMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m Http3ProtocolOptionsMultiError) AllErrors() []error { return m }
+
+// Http3ProtocolOptionsValidationError is the validation error returned by
+// Http3ProtocolOptions.Validate if the designated constraints aren't met.
+type Http3ProtocolOptionsValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e Http3ProtocolOptionsValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e Http3ProtocolOptionsValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e Http3ProtocolOptionsValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e Http3ProtocolOptionsValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e Http3ProtocolOptionsValidationError) ErrorName() string {
+ return "Http3ProtocolOptionsValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e Http3ProtocolOptionsValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sHttp3ProtocolOptions.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = Http3ProtocolOptionsValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = Http3ProtocolOptionsValidationError{}
+
+// Validate checks the field values on SchemeHeaderTransformation with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *SchemeHeaderTransformation) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on SchemeHeaderTransformation with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// SchemeHeaderTransformationMultiError, or nil if none found.
+func (m *SchemeHeaderTransformation) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *SchemeHeaderTransformation) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ switch v := m.Transformation.(type) {
+ case *SchemeHeaderTransformation_SchemeToOverwrite:
+ if v == nil {
+ err := SchemeHeaderTransformationValidationError{
+ field: "Transformation",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if _, ok := _SchemeHeaderTransformation_SchemeToOverwrite_InLookup[m.GetSchemeToOverwrite()]; !ok {
+ err := SchemeHeaderTransformationValidationError{
+ field: "SchemeToOverwrite",
+ reason: "value must be in list [http https]",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ default:
+ _ = v // ensures v is used
+ }
+
+ if len(errors) > 0 {
+ return SchemeHeaderTransformationMultiError(errors)
+ }
+
+ return nil
+}
+
+// SchemeHeaderTransformationMultiError is an error wrapping multiple
+// validation errors returned by SchemeHeaderTransformation.ValidateAll() if
+// the designated constraints aren't met.
+type SchemeHeaderTransformationMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m SchemeHeaderTransformationMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m SchemeHeaderTransformationMultiError) AllErrors() []error { return m }
+
+// SchemeHeaderTransformationValidationError is the validation error returned
+// by SchemeHeaderTransformation.Validate if the designated constraints aren't met.
+type SchemeHeaderTransformationValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e SchemeHeaderTransformationValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e SchemeHeaderTransformationValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e SchemeHeaderTransformationValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e SchemeHeaderTransformationValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e SchemeHeaderTransformationValidationError) ErrorName() string {
+ return "SchemeHeaderTransformationValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e SchemeHeaderTransformationValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sSchemeHeaderTransformation.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = SchemeHeaderTransformationValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = SchemeHeaderTransformationValidationError{}
+
+var _SchemeHeaderTransformation_SchemeToOverwrite_InLookup = map[string]struct{}{
+ "http": {},
+ "https": {},
+}
+
+// Validate checks the field values on
+// AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on
+// AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntryMultiError, or
+// nil if none found.
+func (m *AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if m.GetHostname() != "" {
+
+ if !_AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry_Hostname_Pattern.MatchString(m.GetHostname()) {
+ err := AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntryValidationError{
+ field: "Hostname",
+ reason: "value does not match regex pattern \"^:?[0-9a-zA-Z!#$%&'*+-.^_|~`]+$\"",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ }
+
+ if val := m.GetPort(); val <= 0 || val >= 65535 {
+ err := AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntryValidationError{
+ field: "Port",
+ reason: "value must be inside range (0, 65535)",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntryMultiError(errors)
+ }
+
+ return nil
+}
+
+// AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntryMultiError is an
+// error wrapping multiple validation errors returned by
+// AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry.ValidateAll()
+// if the designated constraints aren't met.
+type AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntryMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntryMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntryMultiError) AllErrors() []error {
+ return m
+}
+
+// AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntryValidationError
+// is the validation error returned by
+// AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry.Validate if the
+// designated constraints aren't met.
+type AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntryValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntryValidationError) Field() string {
+ return e.field
+}
+
+// Reason function returns reason value.
+func (e AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntryValidationError) Reason() string {
+ return e.reason
+}
+
+// Cause function returns cause value.
+func (e AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntryValidationError) Cause() error {
+ return e.cause
+}
+
+// Key function returns key value.
+func (e AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntryValidationError) Key() bool {
+ return e.key
+}
+
+// ErrorName returns error name.
+func (e AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntryValidationError) ErrorName() string {
+ return "AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntryValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntryValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sAlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntryValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntryValidationError{}
+
+var _AlternateProtocolsCacheOptions_AlternateProtocolsCacheEntry_Hostname_Pattern = regexp.MustCompile("^:?[0-9a-zA-Z!#$%&'*+-.^_|~`]+$")
+
+// Validate checks the field values on Http1ProtocolOptions_HeaderKeyFormat
+// with the rules defined in the proto definition for this message. If any
+// rules are violated, the first error encountered is returned, or nil if
+// there are no violations.
+func (m *Http1ProtocolOptions_HeaderKeyFormat) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on Http1ProtocolOptions_HeaderKeyFormat
+// with the rules defined in the proto definition for this message. If any
+// rules are violated, the result is a list of violation errors wrapped in
+// Http1ProtocolOptions_HeaderKeyFormatMultiError, or nil if none found.
+func (m *Http1ProtocolOptions_HeaderKeyFormat) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Http1ProtocolOptions_HeaderKeyFormat) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ oneofHeaderFormatPresent := false
+ switch v := m.HeaderFormat.(type) {
+ case *Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords_:
+ if v == nil {
+ err := Http1ProtocolOptions_HeaderKeyFormatValidationError{
+ field: "HeaderFormat",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofHeaderFormatPresent = true
+
+ if all {
+ switch v := interface{}(m.GetProperCaseWords()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Http1ProtocolOptions_HeaderKeyFormatValidationError{
+ field: "ProperCaseWords",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Http1ProtocolOptions_HeaderKeyFormatValidationError{
+ field: "ProperCaseWords",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetProperCaseWords()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Http1ProtocolOptions_HeaderKeyFormatValidationError{
+ field: "ProperCaseWords",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *Http1ProtocolOptions_HeaderKeyFormat_StatefulFormatter:
+ if v == nil {
+ err := Http1ProtocolOptions_HeaderKeyFormatValidationError{
+ field: "HeaderFormat",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofHeaderFormatPresent = true
+
+ if all {
+ switch v := interface{}(m.GetStatefulFormatter()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Http1ProtocolOptions_HeaderKeyFormatValidationError{
+ field: "StatefulFormatter",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Http1ProtocolOptions_HeaderKeyFormatValidationError{
+ field: "StatefulFormatter",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetStatefulFormatter()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Http1ProtocolOptions_HeaderKeyFormatValidationError{
+ field: "StatefulFormatter",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ default:
+ _ = v // ensures v is used
+ }
+ if !oneofHeaderFormatPresent {
+ err := Http1ProtocolOptions_HeaderKeyFormatValidationError{
+ field: "HeaderFormat",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return Http1ProtocolOptions_HeaderKeyFormatMultiError(errors)
+ }
+
+ return nil
+}
+
+// Http1ProtocolOptions_HeaderKeyFormatMultiError is an error wrapping multiple
+// validation errors returned by
+// Http1ProtocolOptions_HeaderKeyFormat.ValidateAll() if the designated
+// constraints aren't met.
+type Http1ProtocolOptions_HeaderKeyFormatMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m Http1ProtocolOptions_HeaderKeyFormatMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m Http1ProtocolOptions_HeaderKeyFormatMultiError) AllErrors() []error { return m }
+
+// Http1ProtocolOptions_HeaderKeyFormatValidationError is the validation error
+// returned by Http1ProtocolOptions_HeaderKeyFormat.Validate if the designated
+// constraints aren't met.
+type Http1ProtocolOptions_HeaderKeyFormatValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e Http1ProtocolOptions_HeaderKeyFormatValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e Http1ProtocolOptions_HeaderKeyFormatValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e Http1ProtocolOptions_HeaderKeyFormatValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e Http1ProtocolOptions_HeaderKeyFormatValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e Http1ProtocolOptions_HeaderKeyFormatValidationError) ErrorName() string {
+ return "Http1ProtocolOptions_HeaderKeyFormatValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e Http1ProtocolOptions_HeaderKeyFormatValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sHttp1ProtocolOptions_HeaderKeyFormat.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = Http1ProtocolOptions_HeaderKeyFormatValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = Http1ProtocolOptions_HeaderKeyFormatValidationError{}
+
+// Validate checks the field values on
+// Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// first error encountered is returned, or nil if there are no violations.
+func (m *Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on
+// Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in
+// Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsMultiError, or nil if
+// none found.
+func (m *Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if len(errors) > 0 {
+ return Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsMultiError(errors)
+ }
+
+ return nil
+}
+
+// Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsMultiError is an error
+// wrapping multiple validation errors returned by
+// Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords.ValidateAll() if the
+// designated constraints aren't met.
+type Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsMultiError) AllErrors() []error { return m }
+
+// Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsValidationError is the
+// validation error returned by
+// Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWords.Validate if the
+// designated constraints aren't met.
+type Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsValidationError) Field() string {
+ return e.field
+}
+
+// Reason function returns reason value.
+func (e Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsValidationError) Reason() string {
+ return e.reason
+}
+
+// Cause function returns cause value.
+func (e Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsValidationError) Cause() error {
+ return e.cause
+}
+
+// Key function returns key value.
+func (e Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsValidationError) ErrorName() string {
+ return "Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sHttp1ProtocolOptions_HeaderKeyFormat_ProperCaseWords.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = Http1ProtocolOptions_HeaderKeyFormat_ProperCaseWordsValidationError{}
+
+// Validate checks the field values on Http2ProtocolOptions_SettingsParameter
+// with the rules defined in the proto definition for this message. If any
+// rules are violated, the first error encountered is returned, or nil if
+// there are no violations.
+func (m *Http2ProtocolOptions_SettingsParameter) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on
+// Http2ProtocolOptions_SettingsParameter with the rules defined in the proto
+// definition for this message. If any rules are violated, the result is a
+// list of violation errors wrapped in
+// Http2ProtocolOptions_SettingsParameterMultiError, or nil if none found.
+func (m *Http2ProtocolOptions_SettingsParameter) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Http2ProtocolOptions_SettingsParameter) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if wrapper := m.GetIdentifier(); wrapper != nil {
+
+ if val := wrapper.GetValue(); val < 0 || val > 65535 {
+ err := Http2ProtocolOptions_SettingsParameterValidationError{
+ field: "Identifier",
+ reason: "value must be inside range [0, 65535]",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ } else {
+ err := Http2ProtocolOptions_SettingsParameterValidationError{
+ field: "Identifier",
+ reason: "value is required and must not be nil.",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if m.GetValue() == nil {
+ err := Http2ProtocolOptions_SettingsParameterValidationError{
+ field: "Value",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetValue()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Http2ProtocolOptions_SettingsParameterValidationError{
+ field: "Value",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Http2ProtocolOptions_SettingsParameterValidationError{
+ field: "Value",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetValue()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Http2ProtocolOptions_SettingsParameterValidationError{
+ field: "Value",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return Http2ProtocolOptions_SettingsParameterMultiError(errors)
+ }
+
+ return nil
+}
+
+// Http2ProtocolOptions_SettingsParameterMultiError is an error wrapping
+// multiple validation errors returned by
+// Http2ProtocolOptions_SettingsParameter.ValidateAll() if the designated
+// constraints aren't met.
+type Http2ProtocolOptions_SettingsParameterMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m Http2ProtocolOptions_SettingsParameterMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m Http2ProtocolOptions_SettingsParameterMultiError) AllErrors() []error { return m }
+
+// Http2ProtocolOptions_SettingsParameterValidationError is the validation
+// error returned by Http2ProtocolOptions_SettingsParameter.Validate if the
+// designated constraints aren't met.
+type Http2ProtocolOptions_SettingsParameterValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e Http2ProtocolOptions_SettingsParameterValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e Http2ProtocolOptions_SettingsParameterValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e Http2ProtocolOptions_SettingsParameterValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e Http2ProtocolOptions_SettingsParameterValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e Http2ProtocolOptions_SettingsParameterValidationError) ErrorName() string {
+ return "Http2ProtocolOptions_SettingsParameterValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e Http2ProtocolOptions_SettingsParameterValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sHttp2ProtocolOptions_SettingsParameter.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = Http2ProtocolOptions_SettingsParameterValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = Http2ProtocolOptions_SettingsParameterValidationError{}
diff --git a/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/proxy_protocol.pb.go b/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/proxy_protocol.pb.go
new file mode 100644
index 000000000..ba77ffc6d
--- /dev/null
+++ b/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/proxy_protocol.pb.go
@@ -0,0 +1,375 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.23.0
+// protoc v4.23.1
+// source: envoy/config/core/v3/proxy_protocol.proto
+
+package corev3
+
+import (
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ proto "github.com/golang/protobuf/proto"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+type ProxyProtocolPassThroughTLVs_PassTLVsMatchType int32
+
+const (
+ // Pass all TLVs.
+ ProxyProtocolPassThroughTLVs_INCLUDE_ALL ProxyProtocolPassThroughTLVs_PassTLVsMatchType = 0
+ // Pass specific TLVs defined in tlv_type.
+ ProxyProtocolPassThroughTLVs_INCLUDE ProxyProtocolPassThroughTLVs_PassTLVsMatchType = 1
+)
+
+// Enum value maps for ProxyProtocolPassThroughTLVs_PassTLVsMatchType.
+var (
+ ProxyProtocolPassThroughTLVs_PassTLVsMatchType_name = map[int32]string{
+ 0: "INCLUDE_ALL",
+ 1: "INCLUDE",
+ }
+ ProxyProtocolPassThroughTLVs_PassTLVsMatchType_value = map[string]int32{
+ "INCLUDE_ALL": 0,
+ "INCLUDE": 1,
+ }
+)
+
+func (x ProxyProtocolPassThroughTLVs_PassTLVsMatchType) Enum() *ProxyProtocolPassThroughTLVs_PassTLVsMatchType {
+ p := new(ProxyProtocolPassThroughTLVs_PassTLVsMatchType)
+ *p = x
+ return p
+}
+
+func (x ProxyProtocolPassThroughTLVs_PassTLVsMatchType) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (ProxyProtocolPassThroughTLVs_PassTLVsMatchType) Descriptor() protoreflect.EnumDescriptor {
+ return file_envoy_config_core_v3_proxy_protocol_proto_enumTypes[0].Descriptor()
+}
+
+func (ProxyProtocolPassThroughTLVs_PassTLVsMatchType) Type() protoreflect.EnumType {
+ return &file_envoy_config_core_v3_proxy_protocol_proto_enumTypes[0]
+}
+
+func (x ProxyProtocolPassThroughTLVs_PassTLVsMatchType) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use ProxyProtocolPassThroughTLVs_PassTLVsMatchType.Descriptor instead.
+func (ProxyProtocolPassThroughTLVs_PassTLVsMatchType) EnumDescriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_proxy_protocol_proto_rawDescGZIP(), []int{0, 0}
+}
+
+type ProxyProtocolConfig_Version int32
+
+const (
+ // PROXY protocol version 1. Human readable format.
+ ProxyProtocolConfig_V1 ProxyProtocolConfig_Version = 0
+ // PROXY protocol version 2. Binary format.
+ ProxyProtocolConfig_V2 ProxyProtocolConfig_Version = 1
+)
+
+// Enum value maps for ProxyProtocolConfig_Version.
+var (
+ ProxyProtocolConfig_Version_name = map[int32]string{
+ 0: "V1",
+ 1: "V2",
+ }
+ ProxyProtocolConfig_Version_value = map[string]int32{
+ "V1": 0,
+ "V2": 1,
+ }
+)
+
+func (x ProxyProtocolConfig_Version) Enum() *ProxyProtocolConfig_Version {
+ p := new(ProxyProtocolConfig_Version)
+ *p = x
+ return p
+}
+
+func (x ProxyProtocolConfig_Version) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (ProxyProtocolConfig_Version) Descriptor() protoreflect.EnumDescriptor {
+ return file_envoy_config_core_v3_proxy_protocol_proto_enumTypes[1].Descriptor()
+}
+
+func (ProxyProtocolConfig_Version) Type() protoreflect.EnumType {
+ return &file_envoy_config_core_v3_proxy_protocol_proto_enumTypes[1]
+}
+
+func (x ProxyProtocolConfig_Version) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use ProxyProtocolConfig_Version.Descriptor instead.
+func (ProxyProtocolConfig_Version) EnumDescriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_proxy_protocol_proto_rawDescGZIP(), []int{1, 0}
+}
+
+type ProxyProtocolPassThroughTLVs struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The strategy to pass through TLVs. Default is INCLUDE_ALL.
+ // If INCLUDE_ALL is set, all TLVs will be passed through no matter the tlv_type field.
+ MatchType ProxyProtocolPassThroughTLVs_PassTLVsMatchType `protobuf:"varint,1,opt,name=match_type,json=matchType,proto3,enum=envoy.config.core.v3.ProxyProtocolPassThroughTLVs_PassTLVsMatchType" json:"match_type,omitempty"`
+ // The TLV types that are applied based on match_type.
+ // TLV type is defined as uint8_t in proxy protocol. See `the spec
+ // `_ for details.
+ TlvType []uint32 `protobuf:"varint,2,rep,packed,name=tlv_type,json=tlvType,proto3" json:"tlv_type,omitempty"`
+}
+
+func (x *ProxyProtocolPassThroughTLVs) Reset() {
+ *x = ProxyProtocolPassThroughTLVs{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_proxy_protocol_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ProxyProtocolPassThroughTLVs) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ProxyProtocolPassThroughTLVs) ProtoMessage() {}
+
+func (x *ProxyProtocolPassThroughTLVs) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_proxy_protocol_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ProxyProtocolPassThroughTLVs.ProtoReflect.Descriptor instead.
+func (*ProxyProtocolPassThroughTLVs) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_proxy_protocol_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *ProxyProtocolPassThroughTLVs) GetMatchType() ProxyProtocolPassThroughTLVs_PassTLVsMatchType {
+ if x != nil {
+ return x.MatchType
+ }
+ return ProxyProtocolPassThroughTLVs_INCLUDE_ALL
+}
+
+func (x *ProxyProtocolPassThroughTLVs) GetTlvType() []uint32 {
+ if x != nil {
+ return x.TlvType
+ }
+ return nil
+}
+
+type ProxyProtocolConfig struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The PROXY protocol version to use. See https://www.haproxy.org/download/2.1/doc/proxy-protocol.txt for details
+ Version ProxyProtocolConfig_Version `protobuf:"varint,1,opt,name=version,proto3,enum=envoy.config.core.v3.ProxyProtocolConfig_Version" json:"version,omitempty"`
+ // This config controls which TLVs can be passed to upstream if it is Proxy Protocol
+ // V2 header. If there is no setting for this field, no TLVs will be passed through.
+ PassThroughTlvs *ProxyProtocolPassThroughTLVs `protobuf:"bytes,2,opt,name=pass_through_tlvs,json=passThroughTlvs,proto3" json:"pass_through_tlvs,omitempty"`
+}
+
+func (x *ProxyProtocolConfig) Reset() {
+ *x = ProxyProtocolConfig{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_proxy_protocol_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ProxyProtocolConfig) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ProxyProtocolConfig) ProtoMessage() {}
+
+func (x *ProxyProtocolConfig) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_proxy_protocol_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ProxyProtocolConfig.ProtoReflect.Descriptor instead.
+func (*ProxyProtocolConfig) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_proxy_protocol_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *ProxyProtocolConfig) GetVersion() ProxyProtocolConfig_Version {
+ if x != nil {
+ return x.Version
+ }
+ return ProxyProtocolConfig_V1
+}
+
+func (x *ProxyProtocolConfig) GetPassThroughTlvs() *ProxyProtocolPassThroughTLVs {
+ if x != nil {
+ return x.PassThroughTlvs
+ }
+ return nil
+}
+
+var File_envoy_config_core_v3_proxy_protocol_proto protoreflect.FileDescriptor
+
+var file_envoy_config_core_v3_proxy_protocol_proto_rawDesc = []byte{
+ 0x0a, 0x29, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63,
+ 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x5f, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76,
+ 0x33, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64,
+ 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xe0, 0x01, 0x0a, 0x1c, 0x50, 0x72,
+ 0x6f, 0x78, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x50, 0x61, 0x73, 0x73, 0x54,
+ 0x68, 0x72, 0x6f, 0x75, 0x67, 0x68, 0x54, 0x4c, 0x56, 0x73, 0x12, 0x63, 0x0a, 0x0a, 0x6d, 0x61,
+ 0x74, 0x63, 0x68, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x44,
+ 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f,
+ 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f,
+ 0x63, 0x6f, 0x6c, 0x50, 0x61, 0x73, 0x73, 0x54, 0x68, 0x72, 0x6f, 0x75, 0x67, 0x68, 0x54, 0x4c,
+ 0x56, 0x73, 0x2e, 0x50, 0x61, 0x73, 0x73, 0x54, 0x4c, 0x56, 0x73, 0x4d, 0x61, 0x74, 0x63, 0x68,
+ 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x54, 0x79, 0x70, 0x65, 0x12,
+ 0x28, 0x0a, 0x08, 0x74, 0x6c, 0x76, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28,
+ 0x0d, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x92, 0x01, 0x07, 0x22, 0x05, 0x2a, 0x03, 0x10, 0x80, 0x02,
+ 0x52, 0x07, 0x74, 0x6c, 0x76, 0x54, 0x79, 0x70, 0x65, 0x22, 0x31, 0x0a, 0x11, 0x50, 0x61, 0x73,
+ 0x73, 0x54, 0x4c, 0x56, 0x73, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f,
+ 0x0a, 0x0b, 0x49, 0x4e, 0x43, 0x4c, 0x55, 0x44, 0x45, 0x5f, 0x41, 0x4c, 0x4c, 0x10, 0x00, 0x12,
+ 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x43, 0x4c, 0x55, 0x44, 0x45, 0x10, 0x01, 0x22, 0xdd, 0x01, 0x0a,
+ 0x13, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x43, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4b, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x72, 0x6f,
+ 0x78, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f,
+ 0x6e, 0x12, 0x5e, 0x0a, 0x11, 0x70, 0x61, 0x73, 0x73, 0x5f, 0x74, 0x68, 0x72, 0x6f, 0x75, 0x67,
+ 0x68, 0x5f, 0x74, 0x6c, 0x76, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x65,
+ 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65,
+ 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f,
+ 0x6c, 0x50, 0x61, 0x73, 0x73, 0x54, 0x68, 0x72, 0x6f, 0x75, 0x67, 0x68, 0x54, 0x4c, 0x56, 0x73,
+ 0x52, 0x0f, 0x70, 0x61, 0x73, 0x73, 0x54, 0x68, 0x72, 0x6f, 0x75, 0x67, 0x68, 0x54, 0x6c, 0x76,
+ 0x73, 0x22, 0x19, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x06, 0x0a, 0x02,
+ 0x56, 0x31, 0x10, 0x00, 0x12, 0x06, 0x0a, 0x02, 0x56, 0x32, 0x10, 0x01, 0x42, 0x86, 0x01, 0x0a,
+ 0x22, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65,
+ 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65,
+ 0x2e, 0x76, 0x33, 0x42, 0x12, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63,
+ 0x6f, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75,
+ 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79,
+ 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e,
+ 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63,
+ 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x63, 0x6f, 0x72, 0x65, 0x76, 0x33, 0xba, 0x80, 0xc8,
+ 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_config_core_v3_proxy_protocol_proto_rawDescOnce sync.Once
+ file_envoy_config_core_v3_proxy_protocol_proto_rawDescData = file_envoy_config_core_v3_proxy_protocol_proto_rawDesc
+)
+
+func file_envoy_config_core_v3_proxy_protocol_proto_rawDescGZIP() []byte {
+ file_envoy_config_core_v3_proxy_protocol_proto_rawDescOnce.Do(func() {
+ file_envoy_config_core_v3_proxy_protocol_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_core_v3_proxy_protocol_proto_rawDescData)
+ })
+ return file_envoy_config_core_v3_proxy_protocol_proto_rawDescData
+}
+
+var file_envoy_config_core_v3_proxy_protocol_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
+var file_envoy_config_core_v3_proxy_protocol_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_envoy_config_core_v3_proxy_protocol_proto_goTypes = []interface{}{
+ (ProxyProtocolPassThroughTLVs_PassTLVsMatchType)(0), // 0: envoy.config.core.v3.ProxyProtocolPassThroughTLVs.PassTLVsMatchType
+ (ProxyProtocolConfig_Version)(0), // 1: envoy.config.core.v3.ProxyProtocolConfig.Version
+ (*ProxyProtocolPassThroughTLVs)(nil), // 2: envoy.config.core.v3.ProxyProtocolPassThroughTLVs
+ (*ProxyProtocolConfig)(nil), // 3: envoy.config.core.v3.ProxyProtocolConfig
+}
+var file_envoy_config_core_v3_proxy_protocol_proto_depIdxs = []int32{
+ 0, // 0: envoy.config.core.v3.ProxyProtocolPassThroughTLVs.match_type:type_name -> envoy.config.core.v3.ProxyProtocolPassThroughTLVs.PassTLVsMatchType
+ 1, // 1: envoy.config.core.v3.ProxyProtocolConfig.version:type_name -> envoy.config.core.v3.ProxyProtocolConfig.Version
+ 2, // 2: envoy.config.core.v3.ProxyProtocolConfig.pass_through_tlvs:type_name -> envoy.config.core.v3.ProxyProtocolPassThroughTLVs
+ 3, // [3:3] is the sub-list for method output_type
+ 3, // [3:3] is the sub-list for method input_type
+ 3, // [3:3] is the sub-list for extension type_name
+ 3, // [3:3] is the sub-list for extension extendee
+ 0, // [0:3] is the sub-list for field type_name
+}
+
+func init() { file_envoy_config_core_v3_proxy_protocol_proto_init() }
+func file_envoy_config_core_v3_proxy_protocol_proto_init() {
+ if File_envoy_config_core_v3_proxy_protocol_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_config_core_v3_proxy_protocol_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ProxyProtocolPassThroughTLVs); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_proxy_protocol_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ProxyProtocolConfig); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_config_core_v3_proxy_protocol_proto_rawDesc,
+ NumEnums: 2,
+ NumMessages: 2,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_config_core_v3_proxy_protocol_proto_goTypes,
+ DependencyIndexes: file_envoy_config_core_v3_proxy_protocol_proto_depIdxs,
+ EnumInfos: file_envoy_config_core_v3_proxy_protocol_proto_enumTypes,
+ MessageInfos: file_envoy_config_core_v3_proxy_protocol_proto_msgTypes,
+ }.Build()
+ File_envoy_config_core_v3_proxy_protocol_proto = out.File
+ file_envoy_config_core_v3_proxy_protocol_proto_rawDesc = nil
+ file_envoy_config_core_v3_proxy_protocol_proto_goTypes = nil
+ file_envoy_config_core_v3_proxy_protocol_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/proxy_protocol.pb.validate.go b/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/proxy_protocol.pb.validate.go
new file mode 100644
index 000000000..2edd9b116
--- /dev/null
+++ b/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/proxy_protocol.pb.validate.go
@@ -0,0 +1,290 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/config/core/v3/proxy_protocol.proto
+
+package corev3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on ProxyProtocolPassThroughTLVs with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *ProxyProtocolPassThroughTLVs) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on ProxyProtocolPassThroughTLVs with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// ProxyProtocolPassThroughTLVsMultiError, or nil if none found.
+func (m *ProxyProtocolPassThroughTLVs) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *ProxyProtocolPassThroughTLVs) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for MatchType
+
+ for idx, item := range m.GetTlvType() {
+ _, _ = idx, item
+
+ if item >= 256 {
+ err := ProxyProtocolPassThroughTLVsValidationError{
+ field: fmt.Sprintf("TlvType[%v]", idx),
+ reason: "value must be less than 256",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return ProxyProtocolPassThroughTLVsMultiError(errors)
+ }
+
+ return nil
+}
+
+// ProxyProtocolPassThroughTLVsMultiError is an error wrapping multiple
+// validation errors returned by ProxyProtocolPassThroughTLVs.ValidateAll() if
+// the designated constraints aren't met.
+type ProxyProtocolPassThroughTLVsMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ProxyProtocolPassThroughTLVsMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ProxyProtocolPassThroughTLVsMultiError) AllErrors() []error { return m }
+
+// ProxyProtocolPassThroughTLVsValidationError is the validation error returned
+// by ProxyProtocolPassThroughTLVs.Validate if the designated constraints
+// aren't met.
+type ProxyProtocolPassThroughTLVsValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ProxyProtocolPassThroughTLVsValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ProxyProtocolPassThroughTLVsValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ProxyProtocolPassThroughTLVsValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ProxyProtocolPassThroughTLVsValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ProxyProtocolPassThroughTLVsValidationError) ErrorName() string {
+ return "ProxyProtocolPassThroughTLVsValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e ProxyProtocolPassThroughTLVsValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sProxyProtocolPassThroughTLVs.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ProxyProtocolPassThroughTLVsValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ProxyProtocolPassThroughTLVsValidationError{}
+
+// Validate checks the field values on ProxyProtocolConfig with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *ProxyProtocolConfig) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on ProxyProtocolConfig with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// ProxyProtocolConfigMultiError, or nil if none found.
+func (m *ProxyProtocolConfig) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *ProxyProtocolConfig) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for Version
+
+ if all {
+ switch v := interface{}(m.GetPassThroughTlvs()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ProxyProtocolConfigValidationError{
+ field: "PassThroughTlvs",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ProxyProtocolConfigValidationError{
+ field: "PassThroughTlvs",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetPassThroughTlvs()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ProxyProtocolConfigValidationError{
+ field: "PassThroughTlvs",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return ProxyProtocolConfigMultiError(errors)
+ }
+
+ return nil
+}
+
+// ProxyProtocolConfigMultiError is an error wrapping multiple validation
+// errors returned by ProxyProtocolConfig.ValidateAll() if the designated
+// constraints aren't met.
+type ProxyProtocolConfigMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ProxyProtocolConfigMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ProxyProtocolConfigMultiError) AllErrors() []error { return m }
+
+// ProxyProtocolConfigValidationError is the validation error returned by
+// ProxyProtocolConfig.Validate if the designated constraints aren't met.
+type ProxyProtocolConfigValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ProxyProtocolConfigValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ProxyProtocolConfigValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ProxyProtocolConfigValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ProxyProtocolConfigValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ProxyProtocolConfigValidationError) ErrorName() string {
+ return "ProxyProtocolConfigValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e ProxyProtocolConfigValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sProxyProtocolConfig.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ProxyProtocolConfigValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ProxyProtocolConfigValidationError{}
diff --git a/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/resolver.pb.go b/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/resolver.pb.go
new file mode 100644
index 000000000..6f2e61768
--- /dev/null
+++ b/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/resolver.pb.go
@@ -0,0 +1,270 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.23.0
+// protoc v4.23.1
+// source: envoy/config/core/v3/resolver.proto
+
+package corev3
+
+import (
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ proto "github.com/golang/protobuf/proto"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+// Configuration of DNS resolver option flags which control the behavior of the DNS resolver.
+type DnsResolverOptions struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Use TCP for all DNS queries instead of the default protocol UDP.
+ UseTcpForDnsLookups bool `protobuf:"varint,1,opt,name=use_tcp_for_dns_lookups,json=useTcpForDnsLookups,proto3" json:"use_tcp_for_dns_lookups,omitempty"`
+ // Do not use the default search domains; only query hostnames as-is or as aliases.
+ NoDefaultSearchDomain bool `protobuf:"varint,2,opt,name=no_default_search_domain,json=noDefaultSearchDomain,proto3" json:"no_default_search_domain,omitempty"`
+}
+
+func (x *DnsResolverOptions) Reset() {
+ *x = DnsResolverOptions{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_resolver_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DnsResolverOptions) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DnsResolverOptions) ProtoMessage() {}
+
+func (x *DnsResolverOptions) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_resolver_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DnsResolverOptions.ProtoReflect.Descriptor instead.
+func (*DnsResolverOptions) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_resolver_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *DnsResolverOptions) GetUseTcpForDnsLookups() bool {
+ if x != nil {
+ return x.UseTcpForDnsLookups
+ }
+ return false
+}
+
+func (x *DnsResolverOptions) GetNoDefaultSearchDomain() bool {
+ if x != nil {
+ return x.NoDefaultSearchDomain
+ }
+ return false
+}
+
+// DNS resolution configuration which includes the underlying dns resolver addresses and options.
+type DnsResolutionConfig struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // A list of dns resolver addresses. If specified, the DNS client library will perform resolution
+ // via the underlying DNS resolvers. Otherwise, the default system resolvers
+ // (e.g., /etc/resolv.conf) will be used.
+ Resolvers []*Address `protobuf:"bytes,1,rep,name=resolvers,proto3" json:"resolvers,omitempty"`
+ // Configuration of DNS resolver option flags which control the behavior of the DNS resolver.
+ DnsResolverOptions *DnsResolverOptions `protobuf:"bytes,2,opt,name=dns_resolver_options,json=dnsResolverOptions,proto3" json:"dns_resolver_options,omitempty"`
+}
+
+func (x *DnsResolutionConfig) Reset() {
+ *x = DnsResolutionConfig{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_resolver_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DnsResolutionConfig) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DnsResolutionConfig) ProtoMessage() {}
+
+func (x *DnsResolutionConfig) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_resolver_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DnsResolutionConfig.ProtoReflect.Descriptor instead.
+func (*DnsResolutionConfig) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_resolver_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *DnsResolutionConfig) GetResolvers() []*Address {
+ if x != nil {
+ return x.Resolvers
+ }
+ return nil
+}
+
+func (x *DnsResolutionConfig) GetDnsResolverOptions() *DnsResolverOptions {
+ if x != nil {
+ return x.DnsResolverOptions
+ }
+ return nil
+}
+
+var File_envoy_config_core_v3_resolver_proto protoreflect.FileDescriptor
+
+var file_envoy_config_core_v3_resolver_proto_rawDesc = []byte{
+ 0x0a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63,
+ 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x22, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76,
+ 0x33, 0x2f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
+ 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17,
+ 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x83, 0x01, 0x0a, 0x12, 0x44, 0x6e, 0x73, 0x52,
+ 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x34,
+ 0x0a, 0x17, 0x75, 0x73, 0x65, 0x5f, 0x74, 0x63, 0x70, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x64, 0x6e,
+ 0x73, 0x5f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52,
+ 0x13, 0x75, 0x73, 0x65, 0x54, 0x63, 0x70, 0x46, 0x6f, 0x72, 0x44, 0x6e, 0x73, 0x4c, 0x6f, 0x6f,
+ 0x6b, 0x75, 0x70, 0x73, 0x12, 0x37, 0x0a, 0x18, 0x6e, 0x6f, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75,
+ 0x6c, 0x74, 0x5f, 0x73, 0x65, 0x61, 0x72, 0x63, 0x68, 0x5f, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x6e, 0x6f, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c,
+ 0x74, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x22, 0xb8, 0x01,
+ 0x0a, 0x13, 0x44, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x43,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x45, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65,
+ 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e,
+ 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08,
+ 0x01, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x73, 0x12, 0x5a, 0x0a, 0x14,
+ 0x64, 0x6e, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x5f, 0x6f, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76,
+ 0x33, 0x2e, 0x44, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x4f, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x12, 0x64, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65,
+ 0x72, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x81, 0x01, 0x0a, 0x22, 0x69, 0x6f, 0x2e,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42,
+ 0x0d, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01,
+ 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72,
+ 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x3b, 0x63, 0x6f,
+ 0x72, 0x65, 0x76, 0x33, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62, 0x06, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_config_core_v3_resolver_proto_rawDescOnce sync.Once
+ file_envoy_config_core_v3_resolver_proto_rawDescData = file_envoy_config_core_v3_resolver_proto_rawDesc
+)
+
+func file_envoy_config_core_v3_resolver_proto_rawDescGZIP() []byte {
+ file_envoy_config_core_v3_resolver_proto_rawDescOnce.Do(func() {
+ file_envoy_config_core_v3_resolver_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_core_v3_resolver_proto_rawDescData)
+ })
+ return file_envoy_config_core_v3_resolver_proto_rawDescData
+}
+
+var file_envoy_config_core_v3_resolver_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_envoy_config_core_v3_resolver_proto_goTypes = []interface{}{
+ (*DnsResolverOptions)(nil), // 0: envoy.config.core.v3.DnsResolverOptions
+ (*DnsResolutionConfig)(nil), // 1: envoy.config.core.v3.DnsResolutionConfig
+ (*Address)(nil), // 2: envoy.config.core.v3.Address
+}
+var file_envoy_config_core_v3_resolver_proto_depIdxs = []int32{
+ 2, // 0: envoy.config.core.v3.DnsResolutionConfig.resolvers:type_name -> envoy.config.core.v3.Address
+ 0, // 1: envoy.config.core.v3.DnsResolutionConfig.dns_resolver_options:type_name -> envoy.config.core.v3.DnsResolverOptions
+ 2, // [2:2] is the sub-list for method output_type
+ 2, // [2:2] is the sub-list for method input_type
+ 2, // [2:2] is the sub-list for extension type_name
+ 2, // [2:2] is the sub-list for extension extendee
+ 0, // [0:2] is the sub-list for field type_name
+}
+
+func init() { file_envoy_config_core_v3_resolver_proto_init() }
+func file_envoy_config_core_v3_resolver_proto_init() {
+ if File_envoy_config_core_v3_resolver_proto != nil {
+ return
+ }
+ file_envoy_config_core_v3_address_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_config_core_v3_resolver_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DnsResolverOptions); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_resolver_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DnsResolutionConfig); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_config_core_v3_resolver_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 2,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_config_core_v3_resolver_proto_goTypes,
+ DependencyIndexes: file_envoy_config_core_v3_resolver_proto_depIdxs,
+ MessageInfos: file_envoy_config_core_v3_resolver_proto_msgTypes,
+ }.Build()
+ File_envoy_config_core_v3_resolver_proto = out.File
+ file_envoy_config_core_v3_resolver_proto_rawDesc = nil
+ file_envoy_config_core_v3_resolver_proto_goTypes = nil
+ file_envoy_config_core_v3_resolver_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/resolver.pb.validate.go b/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/resolver.pb.validate.go
new file mode 100644
index 000000000..30de106fe
--- /dev/null
+++ b/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/resolver.pb.validate.go
@@ -0,0 +1,318 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/config/core/v3/resolver.proto
+
+package corev3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on DnsResolverOptions with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *DnsResolverOptions) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on DnsResolverOptions with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// DnsResolverOptionsMultiError, or nil if none found.
+func (m *DnsResolverOptions) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *DnsResolverOptions) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for UseTcpForDnsLookups
+
+ // no validation rules for NoDefaultSearchDomain
+
+ if len(errors) > 0 {
+ return DnsResolverOptionsMultiError(errors)
+ }
+
+ return nil
+}
+
+// DnsResolverOptionsMultiError is an error wrapping multiple validation errors
+// returned by DnsResolverOptions.ValidateAll() if the designated constraints
+// aren't met.
+type DnsResolverOptionsMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m DnsResolverOptionsMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m DnsResolverOptionsMultiError) AllErrors() []error { return m }
+
+// DnsResolverOptionsValidationError is the validation error returned by
+// DnsResolverOptions.Validate if the designated constraints aren't met.
+type DnsResolverOptionsValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e DnsResolverOptionsValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e DnsResolverOptionsValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e DnsResolverOptionsValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e DnsResolverOptionsValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e DnsResolverOptionsValidationError) ErrorName() string {
+ return "DnsResolverOptionsValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e DnsResolverOptionsValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sDnsResolverOptions.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = DnsResolverOptionsValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = DnsResolverOptionsValidationError{}
+
+// Validate checks the field values on DnsResolutionConfig with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *DnsResolutionConfig) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on DnsResolutionConfig with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// DnsResolutionConfigMultiError, or nil if none found.
+func (m *DnsResolutionConfig) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *DnsResolutionConfig) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if len(m.GetResolvers()) < 1 {
+ err := DnsResolutionConfigValidationError{
+ field: "Resolvers",
+ reason: "value must contain at least 1 item(s)",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ for idx, item := range m.GetResolvers() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, DnsResolutionConfigValidationError{
+ field: fmt.Sprintf("Resolvers[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, DnsResolutionConfigValidationError{
+ field: fmt.Sprintf("Resolvers[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return DnsResolutionConfigValidationError{
+ field: fmt.Sprintf("Resolvers[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if all {
+ switch v := interface{}(m.GetDnsResolverOptions()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, DnsResolutionConfigValidationError{
+ field: "DnsResolverOptions",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, DnsResolutionConfigValidationError{
+ field: "DnsResolverOptions",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetDnsResolverOptions()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return DnsResolutionConfigValidationError{
+ field: "DnsResolverOptions",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return DnsResolutionConfigMultiError(errors)
+ }
+
+ return nil
+}
+
+// DnsResolutionConfigMultiError is an error wrapping multiple validation
+// errors returned by DnsResolutionConfig.ValidateAll() if the designated
+// constraints aren't met.
+type DnsResolutionConfigMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m DnsResolutionConfigMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m DnsResolutionConfigMultiError) AllErrors() []error { return m }
+
+// DnsResolutionConfigValidationError is the validation error returned by
+// DnsResolutionConfig.Validate if the designated constraints aren't met.
+type DnsResolutionConfigValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e DnsResolutionConfigValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e DnsResolutionConfigValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e DnsResolutionConfigValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e DnsResolutionConfigValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e DnsResolutionConfigValidationError) ErrorName() string {
+ return "DnsResolutionConfigValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e DnsResolutionConfigValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sDnsResolutionConfig.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = DnsResolutionConfigValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = DnsResolutionConfigValidationError{}
diff --git a/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/socket_option.pb.go b/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/socket_option.pb.go
new file mode 100644
index 000000000..6847f51ed
--- /dev/null
+++ b/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/socket_option.pb.go
@@ -0,0 +1,409 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.23.0
+// protoc v4.23.1
+// source: envoy/config/core/v3/socket_option.proto
+
+package corev3
+
+import (
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ proto "github.com/golang/protobuf/proto"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+type SocketOption_SocketState int32
+
+const (
+ // Socket options are applied after socket creation but before binding the socket to a port
+ SocketOption_STATE_PREBIND SocketOption_SocketState = 0
+ // Socket options are applied after binding the socket to a port but before calling listen()
+ SocketOption_STATE_BOUND SocketOption_SocketState = 1
+ // Socket options are applied after calling listen()
+ SocketOption_STATE_LISTENING SocketOption_SocketState = 2
+)
+
+// Enum value maps for SocketOption_SocketState.
+var (
+ SocketOption_SocketState_name = map[int32]string{
+ 0: "STATE_PREBIND",
+ 1: "STATE_BOUND",
+ 2: "STATE_LISTENING",
+ }
+ SocketOption_SocketState_value = map[string]int32{
+ "STATE_PREBIND": 0,
+ "STATE_BOUND": 1,
+ "STATE_LISTENING": 2,
+ }
+)
+
+func (x SocketOption_SocketState) Enum() *SocketOption_SocketState {
+ p := new(SocketOption_SocketState)
+ *p = x
+ return p
+}
+
+func (x SocketOption_SocketState) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (SocketOption_SocketState) Descriptor() protoreflect.EnumDescriptor {
+ return file_envoy_config_core_v3_socket_option_proto_enumTypes[0].Descriptor()
+}
+
+func (SocketOption_SocketState) Type() protoreflect.EnumType {
+ return &file_envoy_config_core_v3_socket_option_proto_enumTypes[0]
+}
+
+func (x SocketOption_SocketState) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use SocketOption_SocketState.Descriptor instead.
+func (SocketOption_SocketState) EnumDescriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_socket_option_proto_rawDescGZIP(), []int{0, 0}
+}
+
+// Generic socket option message. This would be used to set socket options that
+// might not exist in upstream kernels or precompiled Envoy binaries.
+//
+// For example:
+//
+// .. code-block:: json
+//
+// {
+// "description": "support tcp keep alive",
+// "state": 0,
+// "level": 1,
+// "name": 9,
+// "int_value": 1,
+// }
+//
+// 1 means SOL_SOCKET and 9 means SO_KEEPALIVE on Linux.
+// With the above configuration, `TCP Keep-Alives `_
+// can be enabled in socket with Linux, which can be used in
+// :ref:`listener's` or
+// :ref:`admin's ` socket_options etc.
+//
+// It should be noted that the name or level may have different values on different platforms.
+// [#next-free-field: 7]
+type SocketOption struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // An optional name to give this socket option for debugging, etc.
+ // Uniqueness is not required and no special meaning is assumed.
+ Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"`
+ // Corresponding to the level value passed to setsockopt, such as IPPROTO_TCP
+ Level int64 `protobuf:"varint,2,opt,name=level,proto3" json:"level,omitempty"`
+ // The numeric name as passed to setsockopt
+ Name int64 `protobuf:"varint,3,opt,name=name,proto3" json:"name,omitempty"`
+ // Types that are assignable to Value:
+ //
+ // *SocketOption_IntValue
+ // *SocketOption_BufValue
+ Value isSocketOption_Value `protobuf_oneof:"value"`
+ // The state in which the option will be applied. When used in BindConfig
+ // STATE_PREBIND is currently the only valid value.
+ State SocketOption_SocketState `protobuf:"varint,6,opt,name=state,proto3,enum=envoy.config.core.v3.SocketOption_SocketState" json:"state,omitempty"`
+}
+
+func (x *SocketOption) Reset() {
+ *x = SocketOption{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_socket_option_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SocketOption) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SocketOption) ProtoMessage() {}
+
+func (x *SocketOption) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_socket_option_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SocketOption.ProtoReflect.Descriptor instead.
+func (*SocketOption) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_socket_option_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *SocketOption) GetDescription() string {
+ if x != nil {
+ return x.Description
+ }
+ return ""
+}
+
+func (x *SocketOption) GetLevel() int64 {
+ if x != nil {
+ return x.Level
+ }
+ return 0
+}
+
+func (x *SocketOption) GetName() int64 {
+ if x != nil {
+ return x.Name
+ }
+ return 0
+}
+
+func (m *SocketOption) GetValue() isSocketOption_Value {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func (x *SocketOption) GetIntValue() int64 {
+ if x, ok := x.GetValue().(*SocketOption_IntValue); ok {
+ return x.IntValue
+ }
+ return 0
+}
+
+func (x *SocketOption) GetBufValue() []byte {
+ if x, ok := x.GetValue().(*SocketOption_BufValue); ok {
+ return x.BufValue
+ }
+ return nil
+}
+
+func (x *SocketOption) GetState() SocketOption_SocketState {
+ if x != nil {
+ return x.State
+ }
+ return SocketOption_STATE_PREBIND
+}
+
+type isSocketOption_Value interface {
+ isSocketOption_Value()
+}
+
+type SocketOption_IntValue struct {
+ // Because many sockopts take an int value.
+ IntValue int64 `protobuf:"varint,4,opt,name=int_value,json=intValue,proto3,oneof"`
+}
+
+type SocketOption_BufValue struct {
+ // Otherwise it's a byte buffer.
+ BufValue []byte `protobuf:"bytes,5,opt,name=buf_value,json=bufValue,proto3,oneof"`
+}
+
+func (*SocketOption_IntValue) isSocketOption_Value() {}
+
+func (*SocketOption_BufValue) isSocketOption_Value() {}
+
+type SocketOptionsOverride struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ SocketOptions []*SocketOption `protobuf:"bytes,1,rep,name=socket_options,json=socketOptions,proto3" json:"socket_options,omitempty"`
+}
+
+func (x *SocketOptionsOverride) Reset() {
+ *x = SocketOptionsOverride{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_socket_option_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SocketOptionsOverride) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SocketOptionsOverride) ProtoMessage() {}
+
+func (x *SocketOptionsOverride) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_socket_option_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SocketOptionsOverride.ProtoReflect.Descriptor instead.
+func (*SocketOptionsOverride) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_socket_option_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *SocketOptionsOverride) GetSocketOptions() []*SocketOption {
+ if x != nil {
+ return x.SocketOptions
+ }
+ return nil
+}
+
+var File_envoy_config_core_v3_socket_option_proto protoreflect.FileDescriptor
+
+var file_envoy_config_core_v3_socket_option_proto_rawDesc = []byte{
+ 0x0a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63,
+ 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6f, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33,
+ 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
+ 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c,
+ 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xe5, 0x02, 0x0a, 0x0c,
+ 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b,
+ 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14,
+ 0x0a, 0x05, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x6c,
+ 0x65, 0x76, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x03, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x5f,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x08, 0x69,
+ 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1d, 0x0a, 0x09, 0x62, 0x75, 0x66, 0x5f, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x08, 0x62, 0x75,
+ 0x66, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x4e, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18,
+ 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6f, 0x63,
+ 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74,
+ 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52,
+ 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x22, 0x46, 0x0a, 0x0b, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74,
+ 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x11, 0x0a, 0x0d, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x50,
+ 0x52, 0x45, 0x42, 0x49, 0x4e, 0x44, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x53, 0x54, 0x41, 0x54,
+ 0x45, 0x5f, 0x42, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x54, 0x41,
+ 0x54, 0x45, 0x5f, 0x4c, 0x49, 0x53, 0x54, 0x45, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x3a, 0x25,
+ 0x9a, 0xc5, 0x88, 0x1e, 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69,
+ 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4f,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x03,
+ 0xf8, 0x42, 0x01, 0x22, 0x62, 0x0a, 0x15, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x12, 0x49, 0x0a, 0x0e,
+ 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6f, 0x63, 0x6b,
+ 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74,
+ 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x85, 0x01, 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x65,
+ 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e,
+ 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x11,
+ 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74,
+ 0x6f, 0x50, 0x01, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f,
+ 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33,
+ 0x3b, 0x63, 0x6f, 0x72, 0x65, 0x76, 0x33, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62,
+ 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_config_core_v3_socket_option_proto_rawDescOnce sync.Once
+ file_envoy_config_core_v3_socket_option_proto_rawDescData = file_envoy_config_core_v3_socket_option_proto_rawDesc
+)
+
+func file_envoy_config_core_v3_socket_option_proto_rawDescGZIP() []byte {
+ file_envoy_config_core_v3_socket_option_proto_rawDescOnce.Do(func() {
+ file_envoy_config_core_v3_socket_option_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_core_v3_socket_option_proto_rawDescData)
+ })
+ return file_envoy_config_core_v3_socket_option_proto_rawDescData
+}
+
+var file_envoy_config_core_v3_socket_option_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_envoy_config_core_v3_socket_option_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_envoy_config_core_v3_socket_option_proto_goTypes = []interface{}{
+ (SocketOption_SocketState)(0), // 0: envoy.config.core.v3.SocketOption.SocketState
+ (*SocketOption)(nil), // 1: envoy.config.core.v3.SocketOption
+ (*SocketOptionsOverride)(nil), // 2: envoy.config.core.v3.SocketOptionsOverride
+}
+var file_envoy_config_core_v3_socket_option_proto_depIdxs = []int32{
+ 0, // 0: envoy.config.core.v3.SocketOption.state:type_name -> envoy.config.core.v3.SocketOption.SocketState
+ 1, // 1: envoy.config.core.v3.SocketOptionsOverride.socket_options:type_name -> envoy.config.core.v3.SocketOption
+ 2, // [2:2] is the sub-list for method output_type
+ 2, // [2:2] is the sub-list for method input_type
+ 2, // [2:2] is the sub-list for extension type_name
+ 2, // [2:2] is the sub-list for extension extendee
+ 0, // [0:2] is the sub-list for field type_name
+}
+
+func init() { file_envoy_config_core_v3_socket_option_proto_init() }
+func file_envoy_config_core_v3_socket_option_proto_init() {
+ if File_envoy_config_core_v3_socket_option_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_config_core_v3_socket_option_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SocketOption); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_core_v3_socket_option_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SocketOptionsOverride); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_envoy_config_core_v3_socket_option_proto_msgTypes[0].OneofWrappers = []interface{}{
+ (*SocketOption_IntValue)(nil),
+ (*SocketOption_BufValue)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_config_core_v3_socket_option_proto_rawDesc,
+ NumEnums: 1,
+ NumMessages: 2,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_config_core_v3_socket_option_proto_goTypes,
+ DependencyIndexes: file_envoy_config_core_v3_socket_option_proto_depIdxs,
+ EnumInfos: file_envoy_config_core_v3_socket_option_proto_enumTypes,
+ MessageInfos: file_envoy_config_core_v3_socket_option_proto_msgTypes,
+ }.Build()
+ File_envoy_config_core_v3_socket_option_proto = out.File
+ file_envoy_config_core_v3_socket_option_proto_rawDesc = nil
+ file_envoy_config_core_v3_socket_option_proto_goTypes = nil
+ file_envoy_config_core_v3_socket_option_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/socket_option.pb.validate.go b/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/socket_option.pb.validate.go
new file mode 100644
index 000000000..dc0b53f55
--- /dev/null
+++ b/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/socket_option.pb.validate.go
@@ -0,0 +1,330 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/config/core/v3/socket_option.proto
+
+package corev3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on SocketOption with the rules defined in
+// the proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *SocketOption) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on SocketOption with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in SocketOptionMultiError, or
+// nil if none found.
+func (m *SocketOption) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *SocketOption) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for Description
+
+ // no validation rules for Level
+
+ // no validation rules for Name
+
+ if _, ok := SocketOption_SocketState_name[int32(m.GetState())]; !ok {
+ err := SocketOptionValidationError{
+ field: "State",
+ reason: "value must be one of the defined enum values",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ oneofValuePresent := false
+ switch v := m.Value.(type) {
+ case *SocketOption_IntValue:
+ if v == nil {
+ err := SocketOptionValidationError{
+ field: "Value",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofValuePresent = true
+ // no validation rules for IntValue
+ case *SocketOption_BufValue:
+ if v == nil {
+ err := SocketOptionValidationError{
+ field: "Value",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofValuePresent = true
+ // no validation rules for BufValue
+ default:
+ _ = v // ensures v is used
+ }
+ if !oneofValuePresent {
+ err := SocketOptionValidationError{
+ field: "Value",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return SocketOptionMultiError(errors)
+ }
+
+ return nil
+}
+
+// SocketOptionMultiError is an error wrapping multiple validation errors
+// returned by SocketOption.ValidateAll() if the designated constraints aren't met.
+type SocketOptionMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m SocketOptionMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m SocketOptionMultiError) AllErrors() []error { return m }
+
+// SocketOptionValidationError is the validation error returned by
+// SocketOption.Validate if the designated constraints aren't met.
+type SocketOptionValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e SocketOptionValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e SocketOptionValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e SocketOptionValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e SocketOptionValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e SocketOptionValidationError) ErrorName() string { return "SocketOptionValidationError" }
+
+// Error satisfies the builtin error interface
+func (e SocketOptionValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sSocketOption.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = SocketOptionValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = SocketOptionValidationError{}
+
+// Validate checks the field values on SocketOptionsOverride with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *SocketOptionsOverride) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on SocketOptionsOverride with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// SocketOptionsOverrideMultiError, or nil if none found.
+func (m *SocketOptionsOverride) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *SocketOptionsOverride) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ for idx, item := range m.GetSocketOptions() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, SocketOptionsOverrideValidationError{
+ field: fmt.Sprintf("SocketOptions[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, SocketOptionsOverrideValidationError{
+ field: fmt.Sprintf("SocketOptions[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return SocketOptionsOverrideValidationError{
+ field: fmt.Sprintf("SocketOptions[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return SocketOptionsOverrideMultiError(errors)
+ }
+
+ return nil
+}
+
+// SocketOptionsOverrideMultiError is an error wrapping multiple validation
+// errors returned by SocketOptionsOverride.ValidateAll() if the designated
+// constraints aren't met.
+type SocketOptionsOverrideMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m SocketOptionsOverrideMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m SocketOptionsOverrideMultiError) AllErrors() []error { return m }
+
+// SocketOptionsOverrideValidationError is the validation error returned by
+// SocketOptionsOverride.Validate if the designated constraints aren't met.
+type SocketOptionsOverrideValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e SocketOptionsOverrideValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e SocketOptionsOverrideValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e SocketOptionsOverrideValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e SocketOptionsOverrideValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e SocketOptionsOverrideValidationError) ErrorName() string {
+ return "SocketOptionsOverrideValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e SocketOptionsOverrideValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sSocketOptionsOverride.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = SocketOptionsOverrideValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = SocketOptionsOverrideValidationError{}
diff --git a/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/substitution_format_string.pb.go b/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/substitution_format_string.pb.go
new file mode 100644
index 000000000..9e29768d7
--- /dev/null
+++ b/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/substitution_format_string.pb.go
@@ -0,0 +1,363 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.23.0
+// protoc v4.23.1
+// source: envoy/config/core/v3/substitution_format_string.proto
+
+package corev3
+
+import (
+ _ "github.com/cilium/proxy/go/envoy/annotations"
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ proto "github.com/golang/protobuf/proto"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+// Configuration to use multiple :ref:`command operators `
+// to generate a new string in either plain text or JSON format.
+// [#next-free-field: 7]
+type SubstitutionFormatString struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to Format:
+ //
+ // *SubstitutionFormatString_TextFormat
+ // *SubstitutionFormatString_JsonFormat
+ // *SubstitutionFormatString_TextFormatSource
+ Format isSubstitutionFormatString_Format `protobuf_oneof:"format"`
+ // If set to true, when command operators are evaluated to null,
+ //
+ // - for “text_format“, the output of the empty operator is changed from “-“ to an
+ // empty string, so that empty values are omitted entirely.
+ // - for “json_format“ the keys with null values are omitted in the output structure.
+ OmitEmptyValues bool `protobuf:"varint,3,opt,name=omit_empty_values,json=omitEmptyValues,proto3" json:"omit_empty_values,omitempty"`
+ // Specify a “content_type“ field.
+ // If this field is not set then “text/plain“ is used for “text_format“ and
+ // “application/json“ is used for “json_format“.
+ //
+ // .. validated-code-block:: yaml
+ //
+ // :type-name: envoy.config.core.v3.SubstitutionFormatString
+ //
+ // content_type: "text/html; charset=UTF-8"
+ ContentType string `protobuf:"bytes,4,opt,name=content_type,json=contentType,proto3" json:"content_type,omitempty"`
+ // Specifies a collection of Formatter plugins that can be called from the access log configuration.
+ // See the formatters extensions documentation for details.
+ // [#extension-category: envoy.formatter]
+ Formatters []*TypedExtensionConfig `protobuf:"bytes,6,rep,name=formatters,proto3" json:"formatters,omitempty"`
+}
+
+func (x *SubstitutionFormatString) Reset() {
+ *x = SubstitutionFormatString{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_substitution_format_string_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SubstitutionFormatString) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SubstitutionFormatString) ProtoMessage() {}
+
+func (x *SubstitutionFormatString) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_substitution_format_string_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SubstitutionFormatString.ProtoReflect.Descriptor instead.
+func (*SubstitutionFormatString) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_substitution_format_string_proto_rawDescGZIP(), []int{0}
+}
+
+func (m *SubstitutionFormatString) GetFormat() isSubstitutionFormatString_Format {
+ if m != nil {
+ return m.Format
+ }
+ return nil
+}
+
+// Deprecated: Do not use.
+func (x *SubstitutionFormatString) GetTextFormat() string {
+ if x, ok := x.GetFormat().(*SubstitutionFormatString_TextFormat); ok {
+ return x.TextFormat
+ }
+ return ""
+}
+
+func (x *SubstitutionFormatString) GetJsonFormat() *structpb.Struct {
+ if x, ok := x.GetFormat().(*SubstitutionFormatString_JsonFormat); ok {
+ return x.JsonFormat
+ }
+ return nil
+}
+
+func (x *SubstitutionFormatString) GetTextFormatSource() *DataSource {
+ if x, ok := x.GetFormat().(*SubstitutionFormatString_TextFormatSource); ok {
+ return x.TextFormatSource
+ }
+ return nil
+}
+
+func (x *SubstitutionFormatString) GetOmitEmptyValues() bool {
+ if x != nil {
+ return x.OmitEmptyValues
+ }
+ return false
+}
+
+func (x *SubstitutionFormatString) GetContentType() string {
+ if x != nil {
+ return x.ContentType
+ }
+ return ""
+}
+
+func (x *SubstitutionFormatString) GetFormatters() []*TypedExtensionConfig {
+ if x != nil {
+ return x.Formatters
+ }
+ return nil
+}
+
+type isSubstitutionFormatString_Format interface {
+ isSubstitutionFormatString_Format()
+}
+
+type SubstitutionFormatString_TextFormat struct {
+ // Specify a format with command operators to form a text string.
+ // Its details is described in :ref:`format string`.
+ //
+ // For example, setting “text_format“ like below,
+ //
+ // .. validated-code-block:: yaml
+ //
+ // :type-name: envoy.config.core.v3.SubstitutionFormatString
+ //
+ // text_format: "%LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=%REQ(:path)%\n"
+ //
+ // generates plain text similar to:
+ //
+ // .. code-block:: text
+ //
+ // upstream connect error:503:path=/foo
+ //
+ // Deprecated in favor of :ref:`text_format_source `. To migrate text format strings, use the :ref:`inline_string ` field.
+ //
+ // Deprecated: Do not use.
+ TextFormat string `protobuf:"bytes,1,opt,name=text_format,json=textFormat,proto3,oneof"`
+}
+
+type SubstitutionFormatString_JsonFormat struct {
+ // Specify a format with command operators to form a JSON string.
+ // Its details is described in :ref:`format dictionary`.
+ // Values are rendered as strings, numbers, or boolean values as appropriate.
+ // Nested JSON objects may be produced by some command operators (e.g. FILTER_STATE or DYNAMIC_METADATA).
+ // See the documentation for a specific command operator for details.
+ //
+ // .. validated-code-block:: yaml
+ //
+ // :type-name: envoy.config.core.v3.SubstitutionFormatString
+ //
+ // json_format:
+ // status: "%RESPONSE_CODE%"
+ // message: "%LOCAL_REPLY_BODY%"
+ //
+ // The following JSON object would be created:
+ //
+ // .. code-block:: json
+ //
+ // {
+ // "status": 500,
+ // "message": "My error message"
+ // }
+ JsonFormat *structpb.Struct `protobuf:"bytes,2,opt,name=json_format,json=jsonFormat,proto3,oneof"`
+}
+
+type SubstitutionFormatString_TextFormatSource struct {
+ // Specify a format with command operators to form a text string.
+ // Its details is described in :ref:`format string`.
+ //
+ // For example, setting “text_format“ like below,
+ //
+ // .. validated-code-block:: yaml
+ //
+ // :type-name: envoy.config.core.v3.SubstitutionFormatString
+ //
+ // text_format_source:
+ // inline_string: "%LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=%REQ(:path)%\n"
+ //
+ // generates plain text similar to:
+ //
+ // .. code-block:: text
+ //
+ // upstream connect error:503:path=/foo
+ TextFormatSource *DataSource `protobuf:"bytes,5,opt,name=text_format_source,json=textFormatSource,proto3,oneof"`
+}
+
+func (*SubstitutionFormatString_TextFormat) isSubstitutionFormatString_Format() {}
+
+func (*SubstitutionFormatString_JsonFormat) isSubstitutionFormatString_Format() {}
+
+func (*SubstitutionFormatString_TextFormatSource) isSubstitutionFormatString_Format() {}
+
+var File_envoy_config_core_v3_substitution_format_string_proto protoreflect.FileDescriptor
+
+var file_envoy_config_core_v3_substitution_format_string_proto_rawDesc = []byte{
+ 0x0a, 0x35, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63,
+ 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x74, 0x69, 0x74, 0x75, 0x74,
+ 0x69, 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e,
+ 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x65,
+ 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65,
+ 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x24,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72,
+ 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e,
+ 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65,
+ 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22,
+ 0x99, 0x03, 0x0a, 0x18, 0x53, 0x75, 0x62, 0x73, 0x74, 0x69, 0x74, 0x75, 0x74, 0x69, 0x6f, 0x6e,
+ 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x2e, 0x0a, 0x0b,
+ 0x74, 0x65, 0x78, 0x74, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x42, 0x0b, 0x18, 0x01, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x48, 0x00,
+ 0x52, 0x0a, 0x74, 0x65, 0x78, 0x74, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x44, 0x0a, 0x0b,
+ 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a,
+ 0x01, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x0a, 0x6a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d,
+ 0x61, 0x74, 0x12, 0x50, 0x0a, 0x12, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61,
+ 0x74, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20,
+ 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f,
+ 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x48, 0x00, 0x52, 0x10, 0x74, 0x65, 0x78, 0x74, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x53, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x6d, 0x69, 0x74, 0x5f, 0x65, 0x6d, 0x70,
+ 0x74, 0x79, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52,
+ 0x0f, 0x6f, 0x6d, 0x69, 0x74, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73,
+ 0x12, 0x2e, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65,
+ 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0xfa, 0x42, 0x08, 0x72, 0x06, 0xc0, 0x01, 0x02,
+ 0xc8, 0x01, 0x00, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65,
+ 0x12, 0x4a, 0x0a, 0x0a, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x74, 0x65, 0x72, 0x73, 0x18, 0x06,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65,
+ 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x52, 0x0a, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x74, 0x65, 0x72, 0x73, 0x42, 0x0d, 0x0a, 0x06,
+ 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x42, 0x91, 0x01, 0x0a, 0x22,
+ 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e,
+ 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e,
+ 0x76, 0x33, 0x42, 0x1d, 0x53, 0x75, 0x62, 0x73, 0x74, 0x69, 0x74, 0x75, 0x74, 0x69, 0x6f, 0x6e,
+ 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x6f, 0x74,
+ 0x6f, 0x50, 0x01, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f,
+ 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33,
+ 0x3b, 0x63, 0x6f, 0x72, 0x65, 0x76, 0x33, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x62,
+ 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_config_core_v3_substitution_format_string_proto_rawDescOnce sync.Once
+ file_envoy_config_core_v3_substitution_format_string_proto_rawDescData = file_envoy_config_core_v3_substitution_format_string_proto_rawDesc
+)
+
+func file_envoy_config_core_v3_substitution_format_string_proto_rawDescGZIP() []byte {
+ file_envoy_config_core_v3_substitution_format_string_proto_rawDescOnce.Do(func() {
+ file_envoy_config_core_v3_substitution_format_string_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_core_v3_substitution_format_string_proto_rawDescData)
+ })
+ return file_envoy_config_core_v3_substitution_format_string_proto_rawDescData
+}
+
+var file_envoy_config_core_v3_substitution_format_string_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_envoy_config_core_v3_substitution_format_string_proto_goTypes = []interface{}{
+ (*SubstitutionFormatString)(nil), // 0: envoy.config.core.v3.SubstitutionFormatString
+ (*structpb.Struct)(nil), // 1: google.protobuf.Struct
+ (*DataSource)(nil), // 2: envoy.config.core.v3.DataSource
+ (*TypedExtensionConfig)(nil), // 3: envoy.config.core.v3.TypedExtensionConfig
+}
+var file_envoy_config_core_v3_substitution_format_string_proto_depIdxs = []int32{
+ 1, // 0: envoy.config.core.v3.SubstitutionFormatString.json_format:type_name -> google.protobuf.Struct
+ 2, // 1: envoy.config.core.v3.SubstitutionFormatString.text_format_source:type_name -> envoy.config.core.v3.DataSource
+ 3, // 2: envoy.config.core.v3.SubstitutionFormatString.formatters:type_name -> envoy.config.core.v3.TypedExtensionConfig
+ 3, // [3:3] is the sub-list for method output_type
+ 3, // [3:3] is the sub-list for method input_type
+ 3, // [3:3] is the sub-list for extension type_name
+ 3, // [3:3] is the sub-list for extension extendee
+ 0, // [0:3] is the sub-list for field type_name
+}
+
+func init() { file_envoy_config_core_v3_substitution_format_string_proto_init() }
+func file_envoy_config_core_v3_substitution_format_string_proto_init() {
+ if File_envoy_config_core_v3_substitution_format_string_proto != nil {
+ return
+ }
+ file_envoy_config_core_v3_base_proto_init()
+ file_envoy_config_core_v3_extension_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_config_core_v3_substitution_format_string_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SubstitutionFormatString); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_envoy_config_core_v3_substitution_format_string_proto_msgTypes[0].OneofWrappers = []interface{}{
+ (*SubstitutionFormatString_TextFormat)(nil),
+ (*SubstitutionFormatString_JsonFormat)(nil),
+ (*SubstitutionFormatString_TextFormatSource)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_config_core_v3_substitution_format_string_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_config_core_v3_substitution_format_string_proto_goTypes,
+ DependencyIndexes: file_envoy_config_core_v3_substitution_format_string_proto_depIdxs,
+ MessageInfos: file_envoy_config_core_v3_substitution_format_string_proto_msgTypes,
+ }.Build()
+ File_envoy_config_core_v3_substitution_format_string_proto = out.File
+ file_envoy_config_core_v3_substitution_format_string_proto_rawDesc = nil
+ file_envoy_config_core_v3_substitution_format_string_proto_goTypes = nil
+ file_envoy_config_core_v3_substitution_format_string_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/substitution_format_string.pb.validate.go b/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/substitution_format_string.pb.validate.go
new file mode 100644
index 000000000..2f3615c0b
--- /dev/null
+++ b/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/substitution_format_string.pb.validate.go
@@ -0,0 +1,311 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/config/core/v3/substitution_format_string.proto
+
+package corev3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on SubstitutionFormatString with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *SubstitutionFormatString) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on SubstitutionFormatString with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// SubstitutionFormatStringMultiError, or nil if none found.
+func (m *SubstitutionFormatString) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *SubstitutionFormatString) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for OmitEmptyValues
+
+ if !_SubstitutionFormatString_ContentType_Pattern.MatchString(m.GetContentType()) {
+ err := SubstitutionFormatStringValidationError{
+ field: "ContentType",
+ reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ for idx, item := range m.GetFormatters() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, SubstitutionFormatStringValidationError{
+ field: fmt.Sprintf("Formatters[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, SubstitutionFormatStringValidationError{
+ field: fmt.Sprintf("Formatters[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return SubstitutionFormatStringValidationError{
+ field: fmt.Sprintf("Formatters[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ oneofFormatPresent := false
+ switch v := m.Format.(type) {
+ case *SubstitutionFormatString_TextFormat:
+ if v == nil {
+ err := SubstitutionFormatStringValidationError{
+ field: "Format",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofFormatPresent = true
+ // no validation rules for TextFormat
+ case *SubstitutionFormatString_JsonFormat:
+ if v == nil {
+ err := SubstitutionFormatStringValidationError{
+ field: "Format",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofFormatPresent = true
+
+ if m.GetJsonFormat() == nil {
+ err := SubstitutionFormatStringValidationError{
+ field: "JsonFormat",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetJsonFormat()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, SubstitutionFormatStringValidationError{
+ field: "JsonFormat",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, SubstitutionFormatStringValidationError{
+ field: "JsonFormat",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetJsonFormat()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return SubstitutionFormatStringValidationError{
+ field: "JsonFormat",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *SubstitutionFormatString_TextFormatSource:
+ if v == nil {
+ err := SubstitutionFormatStringValidationError{
+ field: "Format",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofFormatPresent = true
+
+ if all {
+ switch v := interface{}(m.GetTextFormatSource()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, SubstitutionFormatStringValidationError{
+ field: "TextFormatSource",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, SubstitutionFormatStringValidationError{
+ field: "TextFormatSource",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetTextFormatSource()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return SubstitutionFormatStringValidationError{
+ field: "TextFormatSource",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ default:
+ _ = v // ensures v is used
+ }
+ if !oneofFormatPresent {
+ err := SubstitutionFormatStringValidationError{
+ field: "Format",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return SubstitutionFormatStringMultiError(errors)
+ }
+
+ return nil
+}
+
+// SubstitutionFormatStringMultiError is an error wrapping multiple validation
+// errors returned by SubstitutionFormatString.ValidateAll() if the designated
+// constraints aren't met.
+type SubstitutionFormatStringMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m SubstitutionFormatStringMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m SubstitutionFormatStringMultiError) AllErrors() []error { return m }
+
+// SubstitutionFormatStringValidationError is the validation error returned by
+// SubstitutionFormatString.Validate if the designated constraints aren't met.
+type SubstitutionFormatStringValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e SubstitutionFormatStringValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e SubstitutionFormatStringValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e SubstitutionFormatStringValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e SubstitutionFormatStringValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e SubstitutionFormatStringValidationError) ErrorName() string {
+ return "SubstitutionFormatStringValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e SubstitutionFormatStringValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sSubstitutionFormatString.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = SubstitutionFormatStringValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = SubstitutionFormatStringValidationError{}
+
+var _SubstitutionFormatString_ContentType_Pattern = regexp.MustCompile("^[^\x00\n\r]*$")
diff --git a/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/udp_socket_config.pb.go b/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/udp_socket_config.pb.go
new file mode 100644
index 000000000..f10d69534
--- /dev/null
+++ b/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/udp_socket_config.pb.go
@@ -0,0 +1,195 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.23.0
+// protoc v4.23.1
+// source: envoy/config/core/v3/udp_socket_config.proto
+
+package corev3
+
+import (
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ proto "github.com/golang/protobuf/proto"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+// Generic UDP socket configuration.
+type UdpSocketConfig struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The maximum size of received UDP datagrams. Using a larger size will cause Envoy to allocate
+ // more memory per socket. Received datagrams above this size will be dropped. If not set
+ // defaults to 1500 bytes.
+ MaxRxDatagramSize *wrapperspb.UInt64Value `protobuf:"bytes,1,opt,name=max_rx_datagram_size,json=maxRxDatagramSize,proto3" json:"max_rx_datagram_size,omitempty"`
+ // Configures whether Generic Receive Offload (GRO)
+ // _ is preferred when reading from the
+ // UDP socket. The default is context dependent and is documented where UdpSocketConfig is used.
+ // This option affects performance but not functionality. If GRO is not supported by the operating
+ // system, non-GRO receive will be used.
+ PreferGro *wrapperspb.BoolValue `protobuf:"bytes,2,opt,name=prefer_gro,json=preferGro,proto3" json:"prefer_gro,omitempty"`
+}
+
+func (x *UdpSocketConfig) Reset() {
+ *x = UdpSocketConfig{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_core_v3_udp_socket_config_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *UdpSocketConfig) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UdpSocketConfig) ProtoMessage() {}
+
+func (x *UdpSocketConfig) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_core_v3_udp_socket_config_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UdpSocketConfig.ProtoReflect.Descriptor instead.
+func (*UdpSocketConfig) Descriptor() ([]byte, []int) {
+ return file_envoy_config_core_v3_udp_socket_config_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *UdpSocketConfig) GetMaxRxDatagramSize() *wrapperspb.UInt64Value {
+ if x != nil {
+ return x.MaxRxDatagramSize
+ }
+ return nil
+}
+
+func (x *UdpSocketConfig) GetPreferGro() *wrapperspb.BoolValue {
+ if x != nil {
+ return x.PreferGro
+ }
+ return nil
+}
+
+var File_envoy_config_core_v3_udp_socket_config_proto protoreflect.FileDescriptor
+
+var file_envoy_config_core_v3_udp_socket_config_proto_rawDesc = []byte{
+ 0x0a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63,
+ 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x75, 0x64, 0x70, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65,
+ 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72,
+ 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61,
+ 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa8, 0x01, 0x0a,
+ 0x0f, 0x55, 0x64, 0x70, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x12, 0x5a, 0x0a, 0x14, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x78, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x67,
+ 0x72, 0x61, 0x6d, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0b, 0xfa, 0x42,
+ 0x08, 0x32, 0x06, 0x10, 0x80, 0x80, 0x04, 0x20, 0x00, 0x52, 0x11, 0x6d, 0x61, 0x78, 0x52, 0x78,
+ 0x44, 0x61, 0x74, 0x61, 0x67, 0x72, 0x61, 0x6d, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x39, 0x0a, 0x0a,
+ 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x5f, 0x67, 0x72, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x09, 0x70, 0x72,
+ 0x65, 0x66, 0x65, 0x72, 0x47, 0x72, 0x6f, 0x42, 0x88, 0x01, 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x65,
+ 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e,
+ 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x14,
+ 0x55, 0x64, 0x70, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x50,
+ 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63,
+ 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f,
+ 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65,
+ 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65,
+ 0x2f, 0x76, 0x33, 0x3b, 0x63, 0x6f, 0x72, 0x65, 0x76, 0x33, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02,
+ 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_config_core_v3_udp_socket_config_proto_rawDescOnce sync.Once
+ file_envoy_config_core_v3_udp_socket_config_proto_rawDescData = file_envoy_config_core_v3_udp_socket_config_proto_rawDesc
+)
+
+func file_envoy_config_core_v3_udp_socket_config_proto_rawDescGZIP() []byte {
+ file_envoy_config_core_v3_udp_socket_config_proto_rawDescOnce.Do(func() {
+ file_envoy_config_core_v3_udp_socket_config_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_core_v3_udp_socket_config_proto_rawDescData)
+ })
+ return file_envoy_config_core_v3_udp_socket_config_proto_rawDescData
+}
+
+var file_envoy_config_core_v3_udp_socket_config_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_envoy_config_core_v3_udp_socket_config_proto_goTypes = []interface{}{
+ (*UdpSocketConfig)(nil), // 0: envoy.config.core.v3.UdpSocketConfig
+ (*wrapperspb.UInt64Value)(nil), // 1: google.protobuf.UInt64Value
+ (*wrapperspb.BoolValue)(nil), // 2: google.protobuf.BoolValue
+}
+var file_envoy_config_core_v3_udp_socket_config_proto_depIdxs = []int32{
+ 1, // 0: envoy.config.core.v3.UdpSocketConfig.max_rx_datagram_size:type_name -> google.protobuf.UInt64Value
+ 2, // 1: envoy.config.core.v3.UdpSocketConfig.prefer_gro:type_name -> google.protobuf.BoolValue
+ 2, // [2:2] is the sub-list for method output_type
+ 2, // [2:2] is the sub-list for method input_type
+ 2, // [2:2] is the sub-list for extension type_name
+ 2, // [2:2] is the sub-list for extension extendee
+ 0, // [0:2] is the sub-list for field type_name
+}
+
+func init() { file_envoy_config_core_v3_udp_socket_config_proto_init() }
+func file_envoy_config_core_v3_udp_socket_config_proto_init() {
+ if File_envoy_config_core_v3_udp_socket_config_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_config_core_v3_udp_socket_config_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*UdpSocketConfig); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_config_core_v3_udp_socket_config_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_config_core_v3_udp_socket_config_proto_goTypes,
+ DependencyIndexes: file_envoy_config_core_v3_udp_socket_config_proto_depIdxs,
+ MessageInfos: file_envoy_config_core_v3_udp_socket_config_proto_msgTypes,
+ }.Build()
+ File_envoy_config_core_v3_udp_socket_config_proto = out.File
+ file_envoy_config_core_v3_udp_socket_config_proto_rawDesc = nil
+ file_envoy_config_core_v3_udp_socket_config_proto_goTypes = nil
+ file_envoy_config_core_v3_udp_socket_config_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/udp_socket_config.pb.validate.go b/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/udp_socket_config.pb.validate.go
new file mode 100644
index 000000000..f0c17960d
--- /dev/null
+++ b/vendor/github.com/cilium/proxy/go/envoy/config/core/v3/udp_socket_config.pb.validate.go
@@ -0,0 +1,180 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/config/core/v3/udp_socket_config.proto
+
+package corev3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on UdpSocketConfig with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// first error encountered is returned, or nil if there are no violations.
+func (m *UdpSocketConfig) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on UdpSocketConfig with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// UdpSocketConfigMultiError, or nil if none found.
+func (m *UdpSocketConfig) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *UdpSocketConfig) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if wrapper := m.GetMaxRxDatagramSize(); wrapper != nil {
+
+ if val := wrapper.GetValue(); val <= 0 || val >= 65536 {
+ err := UdpSocketConfigValidationError{
+ field: "MaxRxDatagramSize",
+ reason: "value must be inside range (0, 65536)",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ }
+
+ if all {
+ switch v := interface{}(m.GetPreferGro()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, UdpSocketConfigValidationError{
+ field: "PreferGro",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, UdpSocketConfigValidationError{
+ field: "PreferGro",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetPreferGro()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return UdpSocketConfigValidationError{
+ field: "PreferGro",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return UdpSocketConfigMultiError(errors)
+ }
+
+ return nil
+}
+
+// UdpSocketConfigMultiError is an error wrapping multiple validation errors
+// returned by UdpSocketConfig.ValidateAll() if the designated constraints
+// aren't met.
+type UdpSocketConfigMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m UdpSocketConfigMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m UdpSocketConfigMultiError) AllErrors() []error { return m }
+
+// UdpSocketConfigValidationError is the validation error returned by
+// UdpSocketConfig.Validate if the designated constraints aren't met.
+type UdpSocketConfigValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e UdpSocketConfigValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e UdpSocketConfigValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e UdpSocketConfigValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e UdpSocketConfigValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e UdpSocketConfigValidationError) ErrorName() string { return "UdpSocketConfigValidationError" }
+
+// Error satisfies the builtin error interface
+func (e UdpSocketConfigValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sUdpSocketConfig.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = UdpSocketConfigValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = UdpSocketConfigValidationError{}
diff --git a/vendor/github.com/cilium/proxy/go/envoy/config/route/v3/route.pb.go b/vendor/github.com/cilium/proxy/go/envoy/config/route/v3/route.pb.go
new file mode 100644
index 000000000..28d5d303f
--- /dev/null
+++ b/vendor/github.com/cilium/proxy/go/envoy/config/route/v3/route.pb.go
@@ -0,0 +1,566 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.23.0
+// protoc v4.23.1
+// source: envoy/config/route/v3/route.proto
+
+package routev3
+
+import (
+ v3 "github.com/cilium/proxy/go/envoy/config/core/v3"
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ proto "github.com/golang/protobuf/proto"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+// [#next-free-field: 17]
+type RouteConfiguration struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The name of the route configuration. For example, it might match
+ // :ref:`route_config_name
+ // ` in
+ // :ref:`envoy_v3_api_msg_extensions.filters.network.http_connection_manager.v3.Rds`.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // An array of virtual hosts that make up the route table.
+ VirtualHosts []*VirtualHost `protobuf:"bytes,2,rep,name=virtual_hosts,json=virtualHosts,proto3" json:"virtual_hosts,omitempty"`
+ // An array of virtual hosts will be dynamically loaded via the VHDS API.
+ // Both “virtual_hosts“ and “vhds“ fields will be used when present. “virtual_hosts“ can be used
+ // for a base routing table or for infrequently changing virtual hosts. “vhds“ is used for
+ // on-demand discovery of virtual hosts. The contents of these two fields will be merged to
+ // generate a routing table for a given RouteConfiguration, with “vhds“ derived configuration
+ // taking precedence.
+ Vhds *Vhds `protobuf:"bytes,9,opt,name=vhds,proto3" json:"vhds,omitempty"`
+ // Optionally specifies a list of HTTP headers that the connection manager
+ // will consider to be internal only. If they are found on external requests they will be cleaned
+ // prior to filter invocation. See :ref:`config_http_conn_man_headers_x-envoy-internal` for more
+ // information.
+ InternalOnlyHeaders []string `protobuf:"bytes,3,rep,name=internal_only_headers,json=internalOnlyHeaders,proto3" json:"internal_only_headers,omitempty"`
+ // Specifies a list of HTTP headers that should be added to each response that
+ // the connection manager encodes. Headers specified at this level are applied
+ // after headers from any enclosed :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost` or
+ // :ref:`envoy_v3_api_msg_config.route.v3.RouteAction`. For more information, including details on
+ // header value syntax, see the documentation on :ref:`custom request headers
+ // `.
+ ResponseHeadersToAdd []*v3.HeaderValueOption `protobuf:"bytes,4,rep,name=response_headers_to_add,json=responseHeadersToAdd,proto3" json:"response_headers_to_add,omitempty"`
+ // Specifies a list of HTTP headers that should be removed from each response
+ // that the connection manager encodes.
+ ResponseHeadersToRemove []string `protobuf:"bytes,5,rep,name=response_headers_to_remove,json=responseHeadersToRemove,proto3" json:"response_headers_to_remove,omitempty"`
+ // Specifies a list of HTTP headers that should be added to each request
+ // routed by the HTTP connection manager. Headers specified at this level are
+ // applied after headers from any enclosed :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost` or
+ // :ref:`envoy_v3_api_msg_config.route.v3.RouteAction`. For more information, including details on
+ // header value syntax, see the documentation on :ref:`custom request headers
+ // `.
+ RequestHeadersToAdd []*v3.HeaderValueOption `protobuf:"bytes,6,rep,name=request_headers_to_add,json=requestHeadersToAdd,proto3" json:"request_headers_to_add,omitempty"`
+ // Specifies a list of HTTP headers that should be removed from each request
+ // routed by the HTTP connection manager.
+ RequestHeadersToRemove []string `protobuf:"bytes,8,rep,name=request_headers_to_remove,json=requestHeadersToRemove,proto3" json:"request_headers_to_remove,omitempty"`
+ // By default, headers that should be added/removed are evaluated from most to least specific:
+ //
+ // * route level
+ // * virtual host level
+ // * connection manager level
+ //
+ // To allow setting overrides at the route or virtual host level, this order can be reversed
+ // by setting this option to true. Defaults to false.
+ MostSpecificHeaderMutationsWins bool `protobuf:"varint,10,opt,name=most_specific_header_mutations_wins,json=mostSpecificHeaderMutationsWins,proto3" json:"most_specific_header_mutations_wins,omitempty"`
+ // An optional boolean that specifies whether the clusters that the route
+ // table refers to will be validated by the cluster manager. If set to true
+ // and a route refers to a non-existent cluster, the route table will not
+ // load. If set to false and a route refers to a non-existent cluster, the
+ // route table will load and the router filter will return a 404 if the route
+ // is selected at runtime. This setting defaults to true if the route table
+ // is statically defined via the :ref:`route_config
+ // `
+ // option. This setting default to false if the route table is loaded dynamically via the
+ // :ref:`rds
+ // `
+ // option. Users may wish to override the default behavior in certain cases (for example when
+ // using CDS with a static route table).
+ ValidateClusters *wrapperspb.BoolValue `protobuf:"bytes,7,opt,name=validate_clusters,json=validateClusters,proto3" json:"validate_clusters,omitempty"`
+ // The maximum bytes of the response :ref:`direct response body
+ // ` size. If not specified the default
+ // is 4096.
+ //
+ // .. warning::
+ //
+ // Envoy currently holds the content of :ref:`direct response body
+ // ` in memory. Be careful setting
+ // this to be larger than the default 4KB, since the allocated memory for direct response body
+ // is not subject to data plane buffering controls.
+ MaxDirectResponseBodySizeBytes *wrapperspb.UInt32Value `protobuf:"bytes,11,opt,name=max_direct_response_body_size_bytes,json=maxDirectResponseBodySizeBytes,proto3" json:"max_direct_response_body_size_bytes,omitempty"`
+ // A list of plugins and their configurations which may be used by a
+ // :ref:`cluster specifier plugin name `
+ // within the route. All “extension.name“ fields in this list must be unique.
+ ClusterSpecifierPlugins []*ClusterSpecifierPlugin `protobuf:"bytes,12,rep,name=cluster_specifier_plugins,json=clusterSpecifierPlugins,proto3" json:"cluster_specifier_plugins,omitempty"`
+ // Specify a set of default request mirroring policies which apply to all routes under its virtual hosts.
+ // Note that policies are not merged, the most specific non-empty one becomes the mirror policies.
+ RequestMirrorPolicies []*RouteAction_RequestMirrorPolicy `protobuf:"bytes,13,rep,name=request_mirror_policies,json=requestMirrorPolicies,proto3" json:"request_mirror_policies,omitempty"`
+ // By default, port in :authority header (if any) is used in host matching.
+ // With this option enabled, Envoy will ignore the port number in the :authority header (if any) when picking VirtualHost.
+ // NOTE: this option will not strip the port number (if any) contained in route config
+ // :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost`.domains field.
+ IgnorePortInHostMatching bool `protobuf:"varint,14,opt,name=ignore_port_in_host_matching,json=ignorePortInHostMatching,proto3" json:"ignore_port_in_host_matching,omitempty"`
+ // Ignore path-parameters in path-matching.
+ // Before RFC3986, URI were like(RFC1808): :///;?#
+ // Envoy by default takes ":path" as ";".
+ // For users who want to only match path on the "" portion, this option should be true.
+ IgnorePathParametersInPathMatching bool `protobuf:"varint,15,opt,name=ignore_path_parameters_in_path_matching,json=ignorePathParametersInPathMatching,proto3" json:"ignore_path_parameters_in_path_matching,omitempty"`
+ // The typed_per_filter_config field can be used to provide RouteConfiguration level per filter config.
+ // The key should match the :ref:`filter config name
+ // `.
+ // The canonical filter name (e.g., “envoy.filters.http.buffer“ for the HTTP buffer filter) can also
+ // be used for the backwards compatibility. If there is no entry referred by the filter config name, the
+ // entry referred by the canonical filter name will be provided to the filters as fallback.
+ //
+ // Use of this field is filter specific;
+ // see the :ref:`HTTP filter documentation ` for if and how it is utilized.
+ // [#comment: An entry's value may be wrapped in a
+ // :ref:`FilterConfig`
+ // message to specify additional options.]
+ TypedPerFilterConfig map[string]*anypb.Any `protobuf:"bytes,16,rep,name=typed_per_filter_config,json=typedPerFilterConfig,proto3" json:"typed_per_filter_config,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+func (x *RouteConfiguration) Reset() {
+ *x = RouteConfiguration{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_route_v3_route_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RouteConfiguration) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RouteConfiguration) ProtoMessage() {}
+
+func (x *RouteConfiguration) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_route_v3_route_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RouteConfiguration.ProtoReflect.Descriptor instead.
+func (*RouteConfiguration) Descriptor() ([]byte, []int) {
+ return file_envoy_config_route_v3_route_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *RouteConfiguration) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *RouteConfiguration) GetVirtualHosts() []*VirtualHost {
+ if x != nil {
+ return x.VirtualHosts
+ }
+ return nil
+}
+
+func (x *RouteConfiguration) GetVhds() *Vhds {
+ if x != nil {
+ return x.Vhds
+ }
+ return nil
+}
+
+func (x *RouteConfiguration) GetInternalOnlyHeaders() []string {
+ if x != nil {
+ return x.InternalOnlyHeaders
+ }
+ return nil
+}
+
+func (x *RouteConfiguration) GetResponseHeadersToAdd() []*v3.HeaderValueOption {
+ if x != nil {
+ return x.ResponseHeadersToAdd
+ }
+ return nil
+}
+
+func (x *RouteConfiguration) GetResponseHeadersToRemove() []string {
+ if x != nil {
+ return x.ResponseHeadersToRemove
+ }
+ return nil
+}
+
+func (x *RouteConfiguration) GetRequestHeadersToAdd() []*v3.HeaderValueOption {
+ if x != nil {
+ return x.RequestHeadersToAdd
+ }
+ return nil
+}
+
+func (x *RouteConfiguration) GetRequestHeadersToRemove() []string {
+ if x != nil {
+ return x.RequestHeadersToRemove
+ }
+ return nil
+}
+
+func (x *RouteConfiguration) GetMostSpecificHeaderMutationsWins() bool {
+ if x != nil {
+ return x.MostSpecificHeaderMutationsWins
+ }
+ return false
+}
+
+func (x *RouteConfiguration) GetValidateClusters() *wrapperspb.BoolValue {
+ if x != nil {
+ return x.ValidateClusters
+ }
+ return nil
+}
+
+func (x *RouteConfiguration) GetMaxDirectResponseBodySizeBytes() *wrapperspb.UInt32Value {
+ if x != nil {
+ return x.MaxDirectResponseBodySizeBytes
+ }
+ return nil
+}
+
+func (x *RouteConfiguration) GetClusterSpecifierPlugins() []*ClusterSpecifierPlugin {
+ if x != nil {
+ return x.ClusterSpecifierPlugins
+ }
+ return nil
+}
+
+func (x *RouteConfiguration) GetRequestMirrorPolicies() []*RouteAction_RequestMirrorPolicy {
+ if x != nil {
+ return x.RequestMirrorPolicies
+ }
+ return nil
+}
+
+func (x *RouteConfiguration) GetIgnorePortInHostMatching() bool {
+ if x != nil {
+ return x.IgnorePortInHostMatching
+ }
+ return false
+}
+
+func (x *RouteConfiguration) GetIgnorePathParametersInPathMatching() bool {
+ if x != nil {
+ return x.IgnorePathParametersInPathMatching
+ }
+ return false
+}
+
+func (x *RouteConfiguration) GetTypedPerFilterConfig() map[string]*anypb.Any {
+ if x != nil {
+ return x.TypedPerFilterConfig
+ }
+ return nil
+}
+
+type Vhds struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Configuration source specifier for VHDS.
+ ConfigSource *v3.ConfigSource `protobuf:"bytes,1,opt,name=config_source,json=configSource,proto3" json:"config_source,omitempty"`
+}
+
+func (x *Vhds) Reset() {
+ *x = Vhds{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_route_v3_route_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Vhds) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Vhds) ProtoMessage() {}
+
+func (x *Vhds) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_route_v3_route_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Vhds.ProtoReflect.Descriptor instead.
+func (*Vhds) Descriptor() ([]byte, []int) {
+ return file_envoy_config_route_v3_route_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *Vhds) GetConfigSource() *v3.ConfigSource {
+ if x != nil {
+ return x.ConfigSource
+ }
+ return nil
+}
+
+var File_envoy_config_route_v3_route_proto protoreflect.FileDescriptor
+
+var file_envoy_config_route_v3_route_proto_rawDesc = []byte{
+ 0x0a, 0x21, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x72,
+ 0x6f, 0x75, 0x74, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33,
+ 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x28, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76,
+ 0x33, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x6f, 0x75,
+ 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f,
+ 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d,
+ 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75,
+ 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f,
+ 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64,
+ 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xcd, 0x0b, 0x0a, 0x12, 0x52, 0x6f,
+ 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
+ 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x47, 0x0a, 0x0d, 0x76, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x5f,
+ 0x68, 0x6f, 0x73, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e,
+ 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65,
+ 0x2e, 0x76, 0x33, 0x2e, 0x56, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x48, 0x6f, 0x73, 0x74, 0x52,
+ 0x0c, 0x76, 0x69, 0x72, 0x74, 0x75, 0x61, 0x6c, 0x48, 0x6f, 0x73, 0x74, 0x73, 0x12, 0x2f, 0x0a,
+ 0x04, 0x76, 0x68, 0x64, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x65, 0x6e,
+ 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65,
+ 0x2e, 0x76, 0x33, 0x2e, 0x56, 0x68, 0x64, 0x73, 0x52, 0x04, 0x76, 0x68, 0x64, 0x73, 0x12, 0x44,
+ 0x0a, 0x15, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x5f,
+ 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x42, 0x10, 0xfa,
+ 0x42, 0x0d, 0x92, 0x01, 0x0a, 0x22, 0x08, 0x72, 0x06, 0xc0, 0x01, 0x01, 0xc8, 0x01, 0x00, 0x52,
+ 0x13, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x4f, 0x6e, 0x6c, 0x79, 0x48, 0x65, 0x61,
+ 0x64, 0x65, 0x72, 0x73, 0x12, 0x69, 0x0a, 0x17, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x61, 0x64, 0x64, 0x18,
+ 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61,
+ 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x09,
+ 0xfa, 0x42, 0x06, 0x92, 0x01, 0x03, 0x10, 0xe8, 0x07, 0x52, 0x14, 0x72, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, 0x41, 0x64, 0x64, 0x12,
+ 0x4d, 0x0a, 0x1a, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64,
+ 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x18, 0x05, 0x20,
+ 0x03, 0x28, 0x09, 0x42, 0x10, 0xfa, 0x42, 0x0d, 0x92, 0x01, 0x0a, 0x22, 0x08, 0x72, 0x06, 0xc0,
+ 0x01, 0x01, 0xc8, 0x01, 0x00, 0x52, 0x17, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48,
+ 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x67,
+ 0x0a, 0x16, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72,
+ 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x61, 0x64, 0x64, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27,
+ 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f,
+ 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x09, 0xfa, 0x42, 0x06, 0x92, 0x01, 0x03, 0x10,
+ 0xe8, 0x07, 0x52, 0x13, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65,
+ 0x72, 0x73, 0x54, 0x6f, 0x41, 0x64, 0x64, 0x12, 0x4b, 0x0a, 0x19, 0x72, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x72, 0x65,
+ 0x6d, 0x6f, 0x76, 0x65, 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x42, 0x10, 0xfa, 0x42, 0x0d, 0x92,
+ 0x01, 0x0a, 0x22, 0x08, 0x72, 0x06, 0xc0, 0x01, 0x01, 0xc8, 0x01, 0x00, 0x52, 0x16, 0x72, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x54, 0x6f, 0x52, 0x65,
+ 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x4c, 0x0a, 0x23, 0x6d, 0x6f, 0x73, 0x74, 0x5f, 0x73, 0x70, 0x65,
+ 0x63, 0x69, 0x66, 0x69, 0x63, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x75, 0x74,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x5f, 0x77, 0x69, 0x6e, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28,
+ 0x08, 0x52, 0x1f, 0x6d, 0x6f, 0x73, 0x74, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x48,
+ 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x57, 0x69,
+ 0x6e, 0x73, 0x12, 0x47, 0x0a, 0x11, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x63,
+ 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+ 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x10, 0x76, 0x61, 0x6c, 0x69, 0x64,
+ 0x61, 0x74, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, 0x69, 0x0a, 0x23, 0x6d,
+ 0x61, 0x78, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x5f, 0x62, 0x6f, 0x64, 0x79, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x62, 0x79, 0x74,
+ 0x65, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33,
+ 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x1e, 0x6d, 0x61, 0x78, 0x44, 0x69, 0x72, 0x65, 0x63,
+ 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x6f, 0x64, 0x79, 0x53, 0x69, 0x7a,
+ 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x69, 0x0a, 0x19, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65,
+ 0x72, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, 0x70, 0x6c, 0x75, 0x67,
+ 0x69, 0x6e, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76,
+ 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69,
+ 0x65, 0x72, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x52, 0x17, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65,
+ 0x72, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e,
+ 0x73, 0x12, 0x6e, 0x0a, 0x17, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x6d, 0x69, 0x72,
+ 0x72, 0x6f, 0x72, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x18, 0x0d, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65,
+ 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x69,
+ 0x72, 0x72, 0x6f, 0x72, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x15, 0x72, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x4d, 0x69, 0x72, 0x72, 0x6f, 0x72, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65,
+ 0x73, 0x12, 0x3e, 0x0a, 0x1c, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x70, 0x6f, 0x72, 0x74,
+ 0x5f, 0x69, 0x6e, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e,
+ 0x67, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x50,
+ 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x48, 0x6f, 0x73, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e,
+ 0x67, 0x12, 0x53, 0x0a, 0x27, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x68,
+ 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x5f, 0x69, 0x6e, 0x5f, 0x70,
+ 0x61, 0x74, 0x68, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x18, 0x0f, 0x20, 0x01,
+ 0x28, 0x08, 0x52, 0x22, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x50, 0x61, 0x74, 0x68, 0x50, 0x61,
+ 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x49, 0x6e, 0x50, 0x61, 0x74, 0x68, 0x4d, 0x61,
+ 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x12, 0x7a, 0x0a, 0x17, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f,
+ 0x70, 0x65, 0x72, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x18, 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e,
+ 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x2e,
+ 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x50, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65,
+ 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x14, 0x74, 0x79,
+ 0x70, 0x65, 0x64, 0x50, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x1a, 0x5d, 0x0a, 0x19, 0x54, 0x79, 0x70, 0x65, 0x64, 0x50, 0x65, 0x72, 0x46, 0x69,
+ 0x6c, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12,
+ 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65,
+ 0x79, 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38,
+ 0x01, 0x3a, 0x26, 0x9a, 0xc5, 0x88, 0x1e, 0x21, 0x0a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e,
+ 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x73, 0x0a, 0x04, 0x56, 0x68, 0x64,
+ 0x73, 0x12, 0x51, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e,
+ 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x08, 0xfa, 0x42,
+ 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x3a, 0x18, 0x9a, 0xc5, 0x88, 0x1e, 0x13, 0x0a, 0x11, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x68, 0x64, 0x73, 0x42, 0x81,
+ 0x01, 0x0a, 0x23, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79,
+ 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f,
+ 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x0a, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x50, 0x72, 0x6f,
+ 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d,
+ 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63,
+ 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2f,
+ 0x76, 0x33, 0x3b, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x76, 0x33, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02,
+ 0x10, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_config_route_v3_route_proto_rawDescOnce sync.Once
+ file_envoy_config_route_v3_route_proto_rawDescData = file_envoy_config_route_v3_route_proto_rawDesc
+)
+
+func file_envoy_config_route_v3_route_proto_rawDescGZIP() []byte {
+ file_envoy_config_route_v3_route_proto_rawDescOnce.Do(func() {
+ file_envoy_config_route_v3_route_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_route_v3_route_proto_rawDescData)
+ })
+ return file_envoy_config_route_v3_route_proto_rawDescData
+}
+
+var file_envoy_config_route_v3_route_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
+var file_envoy_config_route_v3_route_proto_goTypes = []interface{}{
+ (*RouteConfiguration)(nil), // 0: envoy.config.route.v3.RouteConfiguration
+ (*Vhds)(nil), // 1: envoy.config.route.v3.Vhds
+ nil, // 2: envoy.config.route.v3.RouteConfiguration.TypedPerFilterConfigEntry
+ (*VirtualHost)(nil), // 3: envoy.config.route.v3.VirtualHost
+ (*v3.HeaderValueOption)(nil), // 4: envoy.config.core.v3.HeaderValueOption
+ (*wrapperspb.BoolValue)(nil), // 5: google.protobuf.BoolValue
+ (*wrapperspb.UInt32Value)(nil), // 6: google.protobuf.UInt32Value
+ (*ClusterSpecifierPlugin)(nil), // 7: envoy.config.route.v3.ClusterSpecifierPlugin
+ (*RouteAction_RequestMirrorPolicy)(nil), // 8: envoy.config.route.v3.RouteAction.RequestMirrorPolicy
+ (*v3.ConfigSource)(nil), // 9: envoy.config.core.v3.ConfigSource
+ (*anypb.Any)(nil), // 10: google.protobuf.Any
+}
+var file_envoy_config_route_v3_route_proto_depIdxs = []int32{
+ 3, // 0: envoy.config.route.v3.RouteConfiguration.virtual_hosts:type_name -> envoy.config.route.v3.VirtualHost
+ 1, // 1: envoy.config.route.v3.RouteConfiguration.vhds:type_name -> envoy.config.route.v3.Vhds
+ 4, // 2: envoy.config.route.v3.RouteConfiguration.response_headers_to_add:type_name -> envoy.config.core.v3.HeaderValueOption
+ 4, // 3: envoy.config.route.v3.RouteConfiguration.request_headers_to_add:type_name -> envoy.config.core.v3.HeaderValueOption
+ 5, // 4: envoy.config.route.v3.RouteConfiguration.validate_clusters:type_name -> google.protobuf.BoolValue
+ 6, // 5: envoy.config.route.v3.RouteConfiguration.max_direct_response_body_size_bytes:type_name -> google.protobuf.UInt32Value
+ 7, // 6: envoy.config.route.v3.RouteConfiguration.cluster_specifier_plugins:type_name -> envoy.config.route.v3.ClusterSpecifierPlugin
+ 8, // 7: envoy.config.route.v3.RouteConfiguration.request_mirror_policies:type_name -> envoy.config.route.v3.RouteAction.RequestMirrorPolicy
+ 2, // 8: envoy.config.route.v3.RouteConfiguration.typed_per_filter_config:type_name -> envoy.config.route.v3.RouteConfiguration.TypedPerFilterConfigEntry
+ 9, // 9: envoy.config.route.v3.Vhds.config_source:type_name -> envoy.config.core.v3.ConfigSource
+ 10, // 10: envoy.config.route.v3.RouteConfiguration.TypedPerFilterConfigEntry.value:type_name -> google.protobuf.Any
+ 11, // [11:11] is the sub-list for method output_type
+ 11, // [11:11] is the sub-list for method input_type
+ 11, // [11:11] is the sub-list for extension type_name
+ 11, // [11:11] is the sub-list for extension extendee
+ 0, // [0:11] is the sub-list for field type_name
+}
+
+func init() { file_envoy_config_route_v3_route_proto_init() }
+func file_envoy_config_route_v3_route_proto_init() {
+ if File_envoy_config_route_v3_route_proto != nil {
+ return
+ }
+ file_envoy_config_route_v3_route_components_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_config_route_v3_route_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RouteConfiguration); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_route_v3_route_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Vhds); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_config_route_v3_route_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 3,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_config_route_v3_route_proto_goTypes,
+ DependencyIndexes: file_envoy_config_route_v3_route_proto_depIdxs,
+ MessageInfos: file_envoy_config_route_v3_route_proto_msgTypes,
+ }.Build()
+ File_envoy_config_route_v3_route_proto = out.File
+ file_envoy_config_route_v3_route_proto_rawDesc = nil
+ file_envoy_config_route_v3_route_proto_goTypes = nil
+ file_envoy_config_route_v3_route_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cilium/proxy/go/envoy/config/route/v3/route.pb.validate.go b/vendor/github.com/cilium/proxy/go/envoy/config/route/v3/route.pb.validate.go
new file mode 100644
index 000000000..ce8a399e8
--- /dev/null
+++ b/vendor/github.com/cilium/proxy/go/envoy/config/route/v3/route.pb.validate.go
@@ -0,0 +1,663 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/config/route/v3/route.proto
+
+package routev3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on RouteConfiguration with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *RouteConfiguration) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on RouteConfiguration with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// RouteConfigurationMultiError, or nil if none found.
+func (m *RouteConfiguration) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *RouteConfiguration) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for Name
+
+ for idx, item := range m.GetVirtualHosts() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, RouteConfigurationValidationError{
+ field: fmt.Sprintf("VirtualHosts[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, RouteConfigurationValidationError{
+ field: fmt.Sprintf("VirtualHosts[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return RouteConfigurationValidationError{
+ field: fmt.Sprintf("VirtualHosts[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if all {
+ switch v := interface{}(m.GetVhds()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, RouteConfigurationValidationError{
+ field: "Vhds",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, RouteConfigurationValidationError{
+ field: "Vhds",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetVhds()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return RouteConfigurationValidationError{
+ field: "Vhds",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ for idx, item := range m.GetInternalOnlyHeaders() {
+ _, _ = idx, item
+
+ if !_RouteConfiguration_InternalOnlyHeaders_Pattern.MatchString(item) {
+ err := RouteConfigurationValidationError{
+ field: fmt.Sprintf("InternalOnlyHeaders[%v]", idx),
+ reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ }
+
+ if len(m.GetResponseHeadersToAdd()) > 1000 {
+ err := RouteConfigurationValidationError{
+ field: "ResponseHeadersToAdd",
+ reason: "value must contain no more than 1000 item(s)",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ for idx, item := range m.GetResponseHeadersToAdd() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, RouteConfigurationValidationError{
+ field: fmt.Sprintf("ResponseHeadersToAdd[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, RouteConfigurationValidationError{
+ field: fmt.Sprintf("ResponseHeadersToAdd[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return RouteConfigurationValidationError{
+ field: fmt.Sprintf("ResponseHeadersToAdd[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ for idx, item := range m.GetResponseHeadersToRemove() {
+ _, _ = idx, item
+
+ if !_RouteConfiguration_ResponseHeadersToRemove_Pattern.MatchString(item) {
+ err := RouteConfigurationValidationError{
+ field: fmt.Sprintf("ResponseHeadersToRemove[%v]", idx),
+ reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ }
+
+ if len(m.GetRequestHeadersToAdd()) > 1000 {
+ err := RouteConfigurationValidationError{
+ field: "RequestHeadersToAdd",
+ reason: "value must contain no more than 1000 item(s)",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ for idx, item := range m.GetRequestHeadersToAdd() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, RouteConfigurationValidationError{
+ field: fmt.Sprintf("RequestHeadersToAdd[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, RouteConfigurationValidationError{
+ field: fmt.Sprintf("RequestHeadersToAdd[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return RouteConfigurationValidationError{
+ field: fmt.Sprintf("RequestHeadersToAdd[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ for idx, item := range m.GetRequestHeadersToRemove() {
+ _, _ = idx, item
+
+ if !_RouteConfiguration_RequestHeadersToRemove_Pattern.MatchString(item) {
+ err := RouteConfigurationValidationError{
+ field: fmt.Sprintf("RequestHeadersToRemove[%v]", idx),
+ reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ }
+
+ // no validation rules for MostSpecificHeaderMutationsWins
+
+ if all {
+ switch v := interface{}(m.GetValidateClusters()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, RouteConfigurationValidationError{
+ field: "ValidateClusters",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, RouteConfigurationValidationError{
+ field: "ValidateClusters",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetValidateClusters()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return RouteConfigurationValidationError{
+ field: "ValidateClusters",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetMaxDirectResponseBodySizeBytes()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, RouteConfigurationValidationError{
+ field: "MaxDirectResponseBodySizeBytes",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, RouteConfigurationValidationError{
+ field: "MaxDirectResponseBodySizeBytes",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetMaxDirectResponseBodySizeBytes()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return RouteConfigurationValidationError{
+ field: "MaxDirectResponseBodySizeBytes",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ for idx, item := range m.GetClusterSpecifierPlugins() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, RouteConfigurationValidationError{
+ field: fmt.Sprintf("ClusterSpecifierPlugins[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, RouteConfigurationValidationError{
+ field: fmt.Sprintf("ClusterSpecifierPlugins[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return RouteConfigurationValidationError{
+ field: fmt.Sprintf("ClusterSpecifierPlugins[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ for idx, item := range m.GetRequestMirrorPolicies() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, RouteConfigurationValidationError{
+ field: fmt.Sprintf("RequestMirrorPolicies[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, RouteConfigurationValidationError{
+ field: fmt.Sprintf("RequestMirrorPolicies[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return RouteConfigurationValidationError{
+ field: fmt.Sprintf("RequestMirrorPolicies[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ // no validation rules for IgnorePortInHostMatching
+
+ // no validation rules for IgnorePathParametersInPathMatching
+
+ {
+ sorted_keys := make([]string, len(m.GetTypedPerFilterConfig()))
+ i := 0
+ for key := range m.GetTypedPerFilterConfig() {
+ sorted_keys[i] = key
+ i++
+ }
+ sort.Slice(sorted_keys, func(i, j int) bool { return sorted_keys[i] < sorted_keys[j] })
+ for _, key := range sorted_keys {
+ val := m.GetTypedPerFilterConfig()[key]
+ _ = val
+
+ // no validation rules for TypedPerFilterConfig[key]
+
+ if all {
+ switch v := interface{}(val).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, RouteConfigurationValidationError{
+ field: fmt.Sprintf("TypedPerFilterConfig[%v]", key),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, RouteConfigurationValidationError{
+ field: fmt.Sprintf("TypedPerFilterConfig[%v]", key),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(val).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return RouteConfigurationValidationError{
+ field: fmt.Sprintf("TypedPerFilterConfig[%v]", key),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+ }
+
+ if len(errors) > 0 {
+ return RouteConfigurationMultiError(errors)
+ }
+
+ return nil
+}
+
+// RouteConfigurationMultiError is an error wrapping multiple validation errors
+// returned by RouteConfiguration.ValidateAll() if the designated constraints
+// aren't met.
+type RouteConfigurationMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m RouteConfigurationMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m RouteConfigurationMultiError) AllErrors() []error { return m }
+
+// RouteConfigurationValidationError is the validation error returned by
+// RouteConfiguration.Validate if the designated constraints aren't met.
+type RouteConfigurationValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e RouteConfigurationValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e RouteConfigurationValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e RouteConfigurationValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e RouteConfigurationValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e RouteConfigurationValidationError) ErrorName() string {
+ return "RouteConfigurationValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e RouteConfigurationValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sRouteConfiguration.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = RouteConfigurationValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = RouteConfigurationValidationError{}
+
+var _RouteConfiguration_InternalOnlyHeaders_Pattern = regexp.MustCompile("^[^\x00\n\r]*$")
+
+var _RouteConfiguration_ResponseHeadersToRemove_Pattern = regexp.MustCompile("^[^\x00\n\r]*$")
+
+var _RouteConfiguration_RequestHeadersToRemove_Pattern = regexp.MustCompile("^[^\x00\n\r]*$")
+
+// Validate checks the field values on Vhds with the rules defined in the proto
+// definition for this message. If any rules are violated, the first error
+// encountered is returned, or nil if there are no violations.
+func (m *Vhds) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on Vhds with the rules defined in the
+// proto definition for this message. If any rules are violated, the result is
+// a list of violation errors wrapped in VhdsMultiError, or nil if none found.
+func (m *Vhds) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Vhds) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if m.GetConfigSource() == nil {
+ err := VhdsValidationError{
+ field: "ConfigSource",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetConfigSource()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, VhdsValidationError{
+ field: "ConfigSource",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, VhdsValidationError{
+ field: "ConfigSource",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetConfigSource()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return VhdsValidationError{
+ field: "ConfigSource",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return VhdsMultiError(errors)
+ }
+
+ return nil
+}
+
+// VhdsMultiError is an error wrapping multiple validation errors returned by
+// Vhds.ValidateAll() if the designated constraints aren't met.
+type VhdsMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m VhdsMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m VhdsMultiError) AllErrors() []error { return m }
+
+// VhdsValidationError is the validation error returned by Vhds.Validate if the
+// designated constraints aren't met.
+type VhdsValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e VhdsValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e VhdsValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e VhdsValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e VhdsValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e VhdsValidationError) ErrorName() string { return "VhdsValidationError" }
+
+// Error satisfies the builtin error interface
+func (e VhdsValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sVhds.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = VhdsValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = VhdsValidationError{}
diff --git a/vendor/github.com/cilium/proxy/go/envoy/config/route/v3/route_components.pb.go b/vendor/github.com/cilium/proxy/go/envoy/config/route/v3/route_components.pb.go
new file mode 100644
index 000000000..fe5f7aa4c
--- /dev/null
+++ b/vendor/github.com/cilium/proxy/go/envoy/config/route/v3/route_components.pb.go
@@ -0,0 +1,9015 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.23.0
+// protoc v4.23.1
+// source: envoy/config/route/v3/route_components.proto
+
+package routev3
+
+import (
+ _ "github.com/cilium/proxy/go/envoy/annotations"
+ v31 "github.com/cilium/proxy/go/envoy/config/core/v3"
+ v32 "github.com/cilium/proxy/go/envoy/type/matcher/v3"
+ v35 "github.com/cilium/proxy/go/envoy/type/metadata/v3"
+ v34 "github.com/cilium/proxy/go/envoy/type/tracing/v3"
+ v33 "github.com/cilium/proxy/go/envoy/type/v3"
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ _ "github.com/cncf/xds/go/xds/annotations/v3"
+ v3 "github.com/cncf/xds/go/xds/type/matcher/v3"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ proto "github.com/golang/protobuf/proto"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ durationpb "google.golang.org/protobuf/types/known/durationpb"
+ wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// This is a compile-time assertion that a sufficiently up-to-date version
+// of the legacy proto package is being used.
+const _ = proto.ProtoPackageIsVersion4
+
+type VirtualHost_TlsRequirementType int32
+
+const (
+ // No TLS requirement for the virtual host.
+ VirtualHost_NONE VirtualHost_TlsRequirementType = 0
+ // External requests must use TLS. If a request is external and it is not
+ // using TLS, a 301 redirect will be sent telling the client to use HTTPS.
+ VirtualHost_EXTERNAL_ONLY VirtualHost_TlsRequirementType = 1
+ // All requests must use TLS. If a request is not using TLS, a 301 redirect
+ // will be sent telling the client to use HTTPS.
+ VirtualHost_ALL VirtualHost_TlsRequirementType = 2
+)
+
+// Enum value maps for VirtualHost_TlsRequirementType.
+var (
+ VirtualHost_TlsRequirementType_name = map[int32]string{
+ 0: "NONE",
+ 1: "EXTERNAL_ONLY",
+ 2: "ALL",
+ }
+ VirtualHost_TlsRequirementType_value = map[string]int32{
+ "NONE": 0,
+ "EXTERNAL_ONLY": 1,
+ "ALL": 2,
+ }
+)
+
+func (x VirtualHost_TlsRequirementType) Enum() *VirtualHost_TlsRequirementType {
+ p := new(VirtualHost_TlsRequirementType)
+ *p = x
+ return p
+}
+
+func (x VirtualHost_TlsRequirementType) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (VirtualHost_TlsRequirementType) Descriptor() protoreflect.EnumDescriptor {
+ return file_envoy_config_route_v3_route_components_proto_enumTypes[0].Descriptor()
+}
+
+func (VirtualHost_TlsRequirementType) Type() protoreflect.EnumType {
+ return &file_envoy_config_route_v3_route_components_proto_enumTypes[0]
+}
+
+func (x VirtualHost_TlsRequirementType) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use VirtualHost_TlsRequirementType.Descriptor instead.
+func (VirtualHost_TlsRequirementType) EnumDescriptor() ([]byte, []int) {
+ return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{0, 0}
+}
+
+type RouteAction_ClusterNotFoundResponseCode int32
+
+const (
+ // HTTP status code - 503 Service Unavailable.
+ RouteAction_SERVICE_UNAVAILABLE RouteAction_ClusterNotFoundResponseCode = 0
+ // HTTP status code - 404 Not Found.
+ RouteAction_NOT_FOUND RouteAction_ClusterNotFoundResponseCode = 1
+ // HTTP status code - 500 Internal Server Error.
+ RouteAction_INTERNAL_SERVER_ERROR RouteAction_ClusterNotFoundResponseCode = 2
+)
+
+// Enum value maps for RouteAction_ClusterNotFoundResponseCode.
+var (
+ RouteAction_ClusterNotFoundResponseCode_name = map[int32]string{
+ 0: "SERVICE_UNAVAILABLE",
+ 1: "NOT_FOUND",
+ 2: "INTERNAL_SERVER_ERROR",
+ }
+ RouteAction_ClusterNotFoundResponseCode_value = map[string]int32{
+ "SERVICE_UNAVAILABLE": 0,
+ "NOT_FOUND": 1,
+ "INTERNAL_SERVER_ERROR": 2,
+ }
+)
+
+func (x RouteAction_ClusterNotFoundResponseCode) Enum() *RouteAction_ClusterNotFoundResponseCode {
+ p := new(RouteAction_ClusterNotFoundResponseCode)
+ *p = x
+ return p
+}
+
+func (x RouteAction_ClusterNotFoundResponseCode) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (RouteAction_ClusterNotFoundResponseCode) Descriptor() protoreflect.EnumDescriptor {
+ return file_envoy_config_route_v3_route_components_proto_enumTypes[1].Descriptor()
+}
+
+func (RouteAction_ClusterNotFoundResponseCode) Type() protoreflect.EnumType {
+ return &file_envoy_config_route_v3_route_components_proto_enumTypes[1]
+}
+
+func (x RouteAction_ClusterNotFoundResponseCode) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use RouteAction_ClusterNotFoundResponseCode.Descriptor instead.
+func (RouteAction_ClusterNotFoundResponseCode) EnumDescriptor() ([]byte, []int) {
+ return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{8, 0}
+}
+
+// Configures :ref:`internal redirect ` behavior.
+// [#next-major-version: remove this definition - it's defined in the InternalRedirectPolicy message.]
+//
+// Deprecated: Do not use.
+type RouteAction_InternalRedirectAction int32
+
+const (
+ RouteAction_PASS_THROUGH_INTERNAL_REDIRECT RouteAction_InternalRedirectAction = 0
+ RouteAction_HANDLE_INTERNAL_REDIRECT RouteAction_InternalRedirectAction = 1
+)
+
+// Enum value maps for RouteAction_InternalRedirectAction.
+var (
+ RouteAction_InternalRedirectAction_name = map[int32]string{
+ 0: "PASS_THROUGH_INTERNAL_REDIRECT",
+ 1: "HANDLE_INTERNAL_REDIRECT",
+ }
+ RouteAction_InternalRedirectAction_value = map[string]int32{
+ "PASS_THROUGH_INTERNAL_REDIRECT": 0,
+ "HANDLE_INTERNAL_REDIRECT": 1,
+ }
+)
+
+func (x RouteAction_InternalRedirectAction) Enum() *RouteAction_InternalRedirectAction {
+ p := new(RouteAction_InternalRedirectAction)
+ *p = x
+ return p
+}
+
+func (x RouteAction_InternalRedirectAction) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (RouteAction_InternalRedirectAction) Descriptor() protoreflect.EnumDescriptor {
+ return file_envoy_config_route_v3_route_components_proto_enumTypes[2].Descriptor()
+}
+
+func (RouteAction_InternalRedirectAction) Type() protoreflect.EnumType {
+ return &file_envoy_config_route_v3_route_components_proto_enumTypes[2]
+}
+
+func (x RouteAction_InternalRedirectAction) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use RouteAction_InternalRedirectAction.Descriptor instead.
+func (RouteAction_InternalRedirectAction) EnumDescriptor() ([]byte, []int) {
+ return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{8, 1}
+}
+
+type RetryPolicy_ResetHeaderFormat int32
+
+const (
+ RetryPolicy_SECONDS RetryPolicy_ResetHeaderFormat = 0
+ RetryPolicy_UNIX_TIMESTAMP RetryPolicy_ResetHeaderFormat = 1
+)
+
+// Enum value maps for RetryPolicy_ResetHeaderFormat.
+var (
+ RetryPolicy_ResetHeaderFormat_name = map[int32]string{
+ 0: "SECONDS",
+ 1: "UNIX_TIMESTAMP",
+ }
+ RetryPolicy_ResetHeaderFormat_value = map[string]int32{
+ "SECONDS": 0,
+ "UNIX_TIMESTAMP": 1,
+ }
+)
+
+func (x RetryPolicy_ResetHeaderFormat) Enum() *RetryPolicy_ResetHeaderFormat {
+ p := new(RetryPolicy_ResetHeaderFormat)
+ *p = x
+ return p
+}
+
+func (x RetryPolicy_ResetHeaderFormat) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (RetryPolicy_ResetHeaderFormat) Descriptor() protoreflect.EnumDescriptor {
+ return file_envoy_config_route_v3_route_components_proto_enumTypes[3].Descriptor()
+}
+
+func (RetryPolicy_ResetHeaderFormat) Type() protoreflect.EnumType {
+ return &file_envoy_config_route_v3_route_components_proto_enumTypes[3]
+}
+
+func (x RetryPolicy_ResetHeaderFormat) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use RetryPolicy_ResetHeaderFormat.Descriptor instead.
+func (RetryPolicy_ResetHeaderFormat) EnumDescriptor() ([]byte, []int) {
+ return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{9, 0}
+}
+
+type RedirectAction_RedirectResponseCode int32
+
+const (
+ // Moved Permanently HTTP Status Code - 301.
+ RedirectAction_MOVED_PERMANENTLY RedirectAction_RedirectResponseCode = 0
+ // Found HTTP Status Code - 302.
+ RedirectAction_FOUND RedirectAction_RedirectResponseCode = 1
+ // See Other HTTP Status Code - 303.
+ RedirectAction_SEE_OTHER RedirectAction_RedirectResponseCode = 2
+ // Temporary Redirect HTTP Status Code - 307.
+ RedirectAction_TEMPORARY_REDIRECT RedirectAction_RedirectResponseCode = 3
+ // Permanent Redirect HTTP Status Code - 308.
+ RedirectAction_PERMANENT_REDIRECT RedirectAction_RedirectResponseCode = 4
+)
+
+// Enum value maps for RedirectAction_RedirectResponseCode.
+var (
+ RedirectAction_RedirectResponseCode_name = map[int32]string{
+ 0: "MOVED_PERMANENTLY",
+ 1: "FOUND",
+ 2: "SEE_OTHER",
+ 3: "TEMPORARY_REDIRECT",
+ 4: "PERMANENT_REDIRECT",
+ }
+ RedirectAction_RedirectResponseCode_value = map[string]int32{
+ "MOVED_PERMANENTLY": 0,
+ "FOUND": 1,
+ "SEE_OTHER": 2,
+ "TEMPORARY_REDIRECT": 3,
+ "PERMANENT_REDIRECT": 4,
+ }
+)
+
+func (x RedirectAction_RedirectResponseCode) Enum() *RedirectAction_RedirectResponseCode {
+ p := new(RedirectAction_RedirectResponseCode)
+ *p = x
+ return p
+}
+
+func (x RedirectAction_RedirectResponseCode) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (RedirectAction_RedirectResponseCode) Descriptor() protoreflect.EnumDescriptor {
+ return file_envoy_config_route_v3_route_components_proto_enumTypes[4].Descriptor()
+}
+
+func (RedirectAction_RedirectResponseCode) Type() protoreflect.EnumType {
+ return &file_envoy_config_route_v3_route_components_proto_enumTypes[4]
+}
+
+func (x RedirectAction_RedirectResponseCode) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use RedirectAction_RedirectResponseCode.Descriptor instead.
+func (RedirectAction_RedirectResponseCode) EnumDescriptor() ([]byte, []int) {
+ return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{11, 0}
+}
+
+type RateLimit_Action_MetaData_Source int32
+
+const (
+ // Query :ref:`dynamic metadata `
+ RateLimit_Action_MetaData_DYNAMIC RateLimit_Action_MetaData_Source = 0
+ // Query :ref:`route entry metadata `
+ RateLimit_Action_MetaData_ROUTE_ENTRY RateLimit_Action_MetaData_Source = 1
+)
+
+// Enum value maps for RateLimit_Action_MetaData_Source.
+var (
+ RateLimit_Action_MetaData_Source_name = map[int32]string{
+ 0: "DYNAMIC",
+ 1: "ROUTE_ENTRY",
+ }
+ RateLimit_Action_MetaData_Source_value = map[string]int32{
+ "DYNAMIC": 0,
+ "ROUTE_ENTRY": 1,
+ }
+)
+
+func (x RateLimit_Action_MetaData_Source) Enum() *RateLimit_Action_MetaData_Source {
+ p := new(RateLimit_Action_MetaData_Source)
+ *p = x
+ return p
+}
+
+func (x RateLimit_Action_MetaData_Source) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (RateLimit_Action_MetaData_Source) Descriptor() protoreflect.EnumDescriptor {
+ return file_envoy_config_route_v3_route_components_proto_enumTypes[5].Descriptor()
+}
+
+func (RateLimit_Action_MetaData_Source) Type() protoreflect.EnumType {
+ return &file_envoy_config_route_v3_route_components_proto_enumTypes[5]
+}
+
+func (x RateLimit_Action_MetaData_Source) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use RateLimit_Action_MetaData_Source.Descriptor instead.
+func (RateLimit_Action_MetaData_Source) EnumDescriptor() ([]byte, []int) {
+ return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{17, 0, 8, 0}
+}
+
+// The top level element in the routing configuration is a virtual host. Each virtual host has
+// a logical name as well as a set of domains that get routed to it based on the incoming request's
+// host header. This allows a single listener to service multiple top level domain path trees. Once
+// a virtual host is selected based on the domain, the routes are processed in order to see which
+// upstream cluster to route to or whether to perform a redirect.
+// [#next-free-field: 24]
+type VirtualHost struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The logical name of the virtual host. This is used when emitting certain
+ // statistics but is not relevant for routing.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // A list of domains (host/authority header) that will be matched to this
+ // virtual host. Wildcard hosts are supported in the suffix or prefix form.
+ //
+ // Domain search order:
+ // 1. Exact domain names: “www.foo.com“.
+ // 2. Suffix domain wildcards: “*.foo.com“ or “*-bar.foo.com“.
+ // 3. Prefix domain wildcards: “foo.*“ or “foo-*“.
+ // 4. Special wildcard “*“ matching any domain.
+ //
+ // .. note::
+ //
+ // The wildcard will not match the empty string.
+ // e.g. ``*-bar.foo.com`` will match ``baz-bar.foo.com`` but not ``-bar.foo.com``.
+ // The longest wildcards match first.
+ // Only a single virtual host in the entire route configuration can match on ``*``. A domain
+ // must be unique across all virtual hosts or the config will fail to load.
+ //
+ // Domains cannot contain control characters. This is validated by the well_known_regex HTTP_HEADER_VALUE.
+ Domains []string `protobuf:"bytes,2,rep,name=domains,proto3" json:"domains,omitempty"`
+ // The list of routes that will be matched, in order, for incoming requests.
+ // The first route that matches will be used.
+ // Only one of this and “matcher“ can be specified.
+ Routes []*Route `protobuf:"bytes,3,rep,name=routes,proto3" json:"routes,omitempty"`
+ // [#next-major-version: This should be included in a oneof with routes wrapped in a message.]
+ // The match tree to use when resolving route actions for incoming requests. Only one of this and “routes“
+ // can be specified.
+ Matcher *v3.Matcher `protobuf:"bytes,21,opt,name=matcher,proto3" json:"matcher,omitempty"`
+ // Specifies the type of TLS enforcement the virtual host expects. If this option is not
+ // specified, there is no TLS requirement for the virtual host.
+ RequireTls VirtualHost_TlsRequirementType `protobuf:"varint,4,opt,name=require_tls,json=requireTls,proto3,enum=envoy.config.route.v3.VirtualHost_TlsRequirementType" json:"require_tls,omitempty"`
+ // A list of virtual clusters defined for this virtual host. Virtual clusters
+ // are used for additional statistics gathering.
+ VirtualClusters []*VirtualCluster `protobuf:"bytes,5,rep,name=virtual_clusters,json=virtualClusters,proto3" json:"virtual_clusters,omitempty"`
+ // Specifies a set of rate limit configurations that will be applied to the
+ // virtual host.
+ RateLimits []*RateLimit `protobuf:"bytes,6,rep,name=rate_limits,json=rateLimits,proto3" json:"rate_limits,omitempty"`
+ // Specifies a list of HTTP headers that should be added to each request
+ // handled by this virtual host. Headers specified at this level are applied
+ // after headers from enclosed :ref:`envoy_v3_api_msg_config.route.v3.Route` and before headers from the
+ // enclosing :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration`. For more information, including
+ // details on header value syntax, see the documentation on :ref:`custom request headers
+ // `.
+ RequestHeadersToAdd []*v31.HeaderValueOption `protobuf:"bytes,7,rep,name=request_headers_to_add,json=requestHeadersToAdd,proto3" json:"request_headers_to_add,omitempty"`
+ // Specifies a list of HTTP headers that should be removed from each request
+ // handled by this virtual host.
+ RequestHeadersToRemove []string `protobuf:"bytes,13,rep,name=request_headers_to_remove,json=requestHeadersToRemove,proto3" json:"request_headers_to_remove,omitempty"`
+ // Specifies a list of HTTP headers that should be added to each response
+ // handled by this virtual host. Headers specified at this level are applied
+ // after headers from enclosed :ref:`envoy_v3_api_msg_config.route.v3.Route` and before headers from the
+ // enclosing :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration`. For more information, including
+ // details on header value syntax, see the documentation on :ref:`custom request headers
+ // `.
+ ResponseHeadersToAdd []*v31.HeaderValueOption `protobuf:"bytes,10,rep,name=response_headers_to_add,json=responseHeadersToAdd,proto3" json:"response_headers_to_add,omitempty"`
+ // Specifies a list of HTTP headers that should be removed from each response
+ // handled by this virtual host.
+ ResponseHeadersToRemove []string `protobuf:"bytes,11,rep,name=response_headers_to_remove,json=responseHeadersToRemove,proto3" json:"response_headers_to_remove,omitempty"`
+ // Indicates that the virtual host has a CORS policy. This field is ignored if related cors policy is
+ // found in the
+ // :ref:`VirtualHost.typed_per_filter_config`.
+ //
+ // .. attention::
+ //
+ // This option has been deprecated. Please use
+ // :ref:`VirtualHost.typed_per_filter_config`
+ // to configure the CORS HTTP filter.
+ //
+ // Deprecated: Do not use.
+ Cors *CorsPolicy `protobuf:"bytes,8,opt,name=cors,proto3" json:"cors,omitempty"`
+ // The per_filter_config field can be used to provide virtual host-specific configurations for filters.
+ // The key should match the :ref:`filter config name
+ // `.
+ // The canonical filter name (e.g., “envoy.filters.http.buffer“ for the HTTP buffer filter) can also
+ // be used for the backwards compatibility. If there is no entry referred by the filter config name, the
+ // entry referred by the canonical filter name will be provided to the filters as fallback.
+ //
+ // Use of this field is filter specific;
+ // see the :ref:`HTTP filter documentation ` for if and how it is utilized.
+ // [#comment: An entry's value may be wrapped in a
+ // :ref:`FilterConfig`
+ // message to specify additional options.]
+ TypedPerFilterConfig map[string]*anypb.Any `protobuf:"bytes,15,rep,name=typed_per_filter_config,json=typedPerFilterConfig,proto3" json:"typed_per_filter_config,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ // Decides whether the :ref:`x-envoy-attempt-count
+ // ` header should be included
+ // in the upstream request. Setting this option will cause it to override any existing header
+ // value, so in the case of two Envoys on the request path with this option enabled, the upstream
+ // will see the attempt count as perceived by the second Envoy. Defaults to false.
+ // This header is unaffected by the
+ // :ref:`suppress_envoy_headers
+ // ` flag.
+ //
+ // [#next-major-version: rename to include_attempt_count_in_request.]
+ IncludeRequestAttemptCount bool `protobuf:"varint,14,opt,name=include_request_attempt_count,json=includeRequestAttemptCount,proto3" json:"include_request_attempt_count,omitempty"`
+ // Decides whether the :ref:`x-envoy-attempt-count
+ // ` header should be included
+ // in the downstream response. Setting this option will cause the router to override any existing header
+ // value, so in the case of two Envoys on the request path with this option enabled, the downstream
+ // will see the attempt count as perceived by the Envoy closest upstream from itself. Defaults to false.
+ // This header is unaffected by the
+ // :ref:`suppress_envoy_headers
+ // ` flag.
+ IncludeAttemptCountInResponse bool `protobuf:"varint,19,opt,name=include_attempt_count_in_response,json=includeAttemptCountInResponse,proto3" json:"include_attempt_count_in_response,omitempty"`
+ // Indicates the retry policy for all routes in this virtual host. Note that setting a
+ // route level entry will take precedence over this config and it'll be treated
+ // independently (e.g.: values are not inherited).
+ RetryPolicy *RetryPolicy `protobuf:"bytes,16,opt,name=retry_policy,json=retryPolicy,proto3" json:"retry_policy,omitempty"`
+ // [#not-implemented-hide:]
+ // Specifies the configuration for retry policy extension. Note that setting a route level entry
+ // will take precedence over this config and it'll be treated independently (e.g.: values are not
+ // inherited). :ref:`Retry policy ` should not be
+ // set if this field is used.
+ RetryPolicyTypedConfig *anypb.Any `protobuf:"bytes,20,opt,name=retry_policy_typed_config,json=retryPolicyTypedConfig,proto3" json:"retry_policy_typed_config,omitempty"`
+ // Indicates the hedge policy for all routes in this virtual host. Note that setting a
+ // route level entry will take precedence over this config and it'll be treated
+ // independently (e.g.: values are not inherited).
+ HedgePolicy *HedgePolicy `protobuf:"bytes,17,opt,name=hedge_policy,json=hedgePolicy,proto3" json:"hedge_policy,omitempty"`
+ // Decides whether to include the :ref:`x-envoy-is-timeout-retry `
+ // request header in retries initiated by per try timeouts.
+ IncludeIsTimeoutRetryHeader bool `protobuf:"varint,23,opt,name=include_is_timeout_retry_header,json=includeIsTimeoutRetryHeader,proto3" json:"include_is_timeout_retry_header,omitempty"`
+ // The maximum bytes which will be buffered for retries and shadowing.
+ // If set and a route-specific limit is not set, the bytes actually buffered will be the minimum
+ // value of this and the listener per_connection_buffer_limit_bytes.
+ PerRequestBufferLimitBytes *wrapperspb.UInt32Value `protobuf:"bytes,18,opt,name=per_request_buffer_limit_bytes,json=perRequestBufferLimitBytes,proto3" json:"per_request_buffer_limit_bytes,omitempty"`
+ // Specify a set of default request mirroring policies for every route under this virtual host.
+ // It takes precedence over the route config mirror policy entirely.
+ // That is, policies are not merged, the most specific non-empty one becomes the mirror policies.
+ RequestMirrorPolicies []*RouteAction_RequestMirrorPolicy `protobuf:"bytes,22,rep,name=request_mirror_policies,json=requestMirrorPolicies,proto3" json:"request_mirror_policies,omitempty"`
+}
+
+func (x *VirtualHost) Reset() {
+ *x = VirtualHost{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VirtualHost) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VirtualHost) ProtoMessage() {}
+
+func (x *VirtualHost) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VirtualHost.ProtoReflect.Descriptor instead.
+func (*VirtualHost) Descriptor() ([]byte, []int) {
+ return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *VirtualHost) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *VirtualHost) GetDomains() []string {
+ if x != nil {
+ return x.Domains
+ }
+ return nil
+}
+
+func (x *VirtualHost) GetRoutes() []*Route {
+ if x != nil {
+ return x.Routes
+ }
+ return nil
+}
+
+func (x *VirtualHost) GetMatcher() *v3.Matcher {
+ if x != nil {
+ return x.Matcher
+ }
+ return nil
+}
+
+func (x *VirtualHost) GetRequireTls() VirtualHost_TlsRequirementType {
+ if x != nil {
+ return x.RequireTls
+ }
+ return VirtualHost_NONE
+}
+
+func (x *VirtualHost) GetVirtualClusters() []*VirtualCluster {
+ if x != nil {
+ return x.VirtualClusters
+ }
+ return nil
+}
+
+func (x *VirtualHost) GetRateLimits() []*RateLimit {
+ if x != nil {
+ return x.RateLimits
+ }
+ return nil
+}
+
+func (x *VirtualHost) GetRequestHeadersToAdd() []*v31.HeaderValueOption {
+ if x != nil {
+ return x.RequestHeadersToAdd
+ }
+ return nil
+}
+
+func (x *VirtualHost) GetRequestHeadersToRemove() []string {
+ if x != nil {
+ return x.RequestHeadersToRemove
+ }
+ return nil
+}
+
+func (x *VirtualHost) GetResponseHeadersToAdd() []*v31.HeaderValueOption {
+ if x != nil {
+ return x.ResponseHeadersToAdd
+ }
+ return nil
+}
+
+func (x *VirtualHost) GetResponseHeadersToRemove() []string {
+ if x != nil {
+ return x.ResponseHeadersToRemove
+ }
+ return nil
+}
+
+// Deprecated: Do not use.
+func (x *VirtualHost) GetCors() *CorsPolicy {
+ if x != nil {
+ return x.Cors
+ }
+ return nil
+}
+
+func (x *VirtualHost) GetTypedPerFilterConfig() map[string]*anypb.Any {
+ if x != nil {
+ return x.TypedPerFilterConfig
+ }
+ return nil
+}
+
+func (x *VirtualHost) GetIncludeRequestAttemptCount() bool {
+ if x != nil {
+ return x.IncludeRequestAttemptCount
+ }
+ return false
+}
+
+func (x *VirtualHost) GetIncludeAttemptCountInResponse() bool {
+ if x != nil {
+ return x.IncludeAttemptCountInResponse
+ }
+ return false
+}
+
+func (x *VirtualHost) GetRetryPolicy() *RetryPolicy {
+ if x != nil {
+ return x.RetryPolicy
+ }
+ return nil
+}
+
+func (x *VirtualHost) GetRetryPolicyTypedConfig() *anypb.Any {
+ if x != nil {
+ return x.RetryPolicyTypedConfig
+ }
+ return nil
+}
+
+func (x *VirtualHost) GetHedgePolicy() *HedgePolicy {
+ if x != nil {
+ return x.HedgePolicy
+ }
+ return nil
+}
+
+func (x *VirtualHost) GetIncludeIsTimeoutRetryHeader() bool {
+ if x != nil {
+ return x.IncludeIsTimeoutRetryHeader
+ }
+ return false
+}
+
+func (x *VirtualHost) GetPerRequestBufferLimitBytes() *wrapperspb.UInt32Value {
+ if x != nil {
+ return x.PerRequestBufferLimitBytes
+ }
+ return nil
+}
+
+func (x *VirtualHost) GetRequestMirrorPolicies() []*RouteAction_RequestMirrorPolicy {
+ if x != nil {
+ return x.RequestMirrorPolicies
+ }
+ return nil
+}
+
+// A filter-defined action type.
+type FilterAction struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Action *anypb.Any `protobuf:"bytes,1,opt,name=action,proto3" json:"action,omitempty"`
+}
+
+func (x *FilterAction) Reset() {
+ *x = FilterAction{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *FilterAction) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FilterAction) ProtoMessage() {}
+
+func (x *FilterAction) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use FilterAction.ProtoReflect.Descriptor instead.
+func (*FilterAction) Descriptor() ([]byte, []int) {
+ return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *FilterAction) GetAction() *anypb.Any {
+ if x != nil {
+ return x.Action
+ }
+ return nil
+}
+
+// This can be used in route matcher :ref:`VirtualHost.matcher `.
+// When the matcher matches, routes will be matched and run.
+type RouteList struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The list of routes that will be matched and run, in order. The first route that matches will be used.
+ Routes []*Route `protobuf:"bytes,1,rep,name=routes,proto3" json:"routes,omitempty"`
+}
+
+func (x *RouteList) Reset() {
+ *x = RouteList{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RouteList) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RouteList) ProtoMessage() {}
+
+func (x *RouteList) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_route_v3_route_components_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RouteList.ProtoReflect.Descriptor instead.
+func (*RouteList) Descriptor() ([]byte, []int) {
+ return file_envoy_config_route_v3_route_components_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *RouteList) GetRoutes() []*Route {
+ if x != nil {
+ return x.Routes
+ }
+ return nil
+}
+
+// A route is both a specification of how to match a request as well as an indication of what to do
+// next (e.g., redirect, forward, rewrite, etc.).
+//
+// .. attention::
+//
+// Envoy supports routing on HTTP method via :ref:`header matching
+// `.
+//
+// [#next-free-field: 20]
+type Route struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Name for the route.
+ Name string `protobuf:"bytes,14,opt,name=name,proto3" json:"name,omitempty"`
+ // Route matching parameters.
+ Match *RouteMatch `protobuf:"bytes,1,opt,name=match,proto3" json:"match,omitempty"`
+ // Types that are assignable to Action:
+ //
+ // *Route_Route
+ // *Route_Redirect
+ // *Route_DirectResponse
+ // *Route_FilterAction
+ // *Route_NonForwardingAction
+ Action isRoute_Action `protobuf_oneof:"action"`
+ // The Metadata field can be used to provide additional information
+ // about the route. It can be used for configuration, stats, and logging.
+ // The metadata should go under the filter namespace that will need it.
+ // For instance, if the metadata is intended for the Router filter,
+ // the filter name should be specified as “envoy.filters.http.router“.
+ Metadata *v31.Metadata `protobuf:"bytes,4,opt,name=metadata,proto3" json:"metadata,omitempty"`
+ // Decorator for the matched route.
+ Decorator *Decorator `protobuf:"bytes,5,opt,name=decorator,proto3" json:"decorator,omitempty"`
+ // The per_filter_config field can be used to provide route-specific configurations for filters.
+ // The key should match the :ref:`filter config name
+ // `.
+ // The canonical filter name (e.g., “envoy.filters.http.buffer“ for the HTTP buffer filter) can also
+ // be used for the backwards compatibility. If there is no entry referred by the filter config name, the
+ // entry referred by the canonical filter name will be provided to the filters as fallback.
+ //
+ // Use of this field is filter specific;
+ // see the :ref:`HTTP filter documentation ` for if and how it is utilized.
+ // [#comment: An entry's value may be wrapped in a
+ // :ref:`FilterConfig`
+ // message to specify additional options.]
+ TypedPerFilterConfig map[string]*anypb.Any `protobuf:"bytes,13,rep,name=typed_per_filter_config,json=typedPerFilterConfig,proto3" json:"typed_per_filter_config,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ // Specifies a set of headers that will be added to requests matching this
+ // route. Headers specified at this level are applied before headers from the
+ // enclosing :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost` and
+ // :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration`. For more information, including details on
+ // header value syntax, see the documentation on :ref:`custom request headers
+ // `.
+ RequestHeadersToAdd []*v31.HeaderValueOption `protobuf:"bytes,9,rep,name=request_headers_to_add,json=requestHeadersToAdd,proto3" json:"request_headers_to_add,omitempty"`
+ // Specifies a list of HTTP headers that should be removed from each request
+ // matching this route.
+ RequestHeadersToRemove []string `protobuf:"bytes,12,rep,name=request_headers_to_remove,json=requestHeadersToRemove,proto3" json:"request_headers_to_remove,omitempty"`
+ // Specifies a set of headers that will be added to responses to requests
+ // matching this route. Headers specified at this level are applied before
+ // headers from the enclosing :ref:`envoy_v3_api_msg_config.route.v3.VirtualHost` and
+ // :ref:`envoy_v3_api_msg_config.route.v3.RouteConfiguration`. For more information, including
+ // details on header value syntax, see the documentation on
+ // :ref:`custom request headers